code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from builtins import str, bytes
import subprocess
import os
import re
import zlib
import base64
import sys
from types import ModuleType, FunctionType
from gc import get_referents
def lowerCmsHeaders(headers):
"""
Lower CMS headers in provided header's dict. The WMCore Authentication
code check only cms headers in lower case, e.g. cms-xxx-yyy.
"""
lheaders = {}
for hkey, hval in list(headers.items()): # perform lower-case
# lower header keys since we check lower-case in headers
if hkey.startswith('Cms-') or hkey.startswith('CMS-'):
lheaders[hkey.lower()] = hval
else:
lheaders[hkey] = hval
return lheaders
def makeList(stringList):
"""
_makeList_
Make a python list out of a comma separated list of strings,
throws a ValueError if the input is not well formed.
If the stringList is already of type list, then return it untouched.
"""
if isinstance(stringList, list):
return stringList
if isinstance(stringList, str):
toks = stringList.lstrip(' [').rstrip(' ]').split(',')
if toks == ['']:
return []
return [str(tok.strip(' \'"')) for tok in toks]
raise ValueError("Can't convert to list %s" % stringList)
def makeNonEmptyList(stringList):
"""
_makeNonEmptyList_
Given a string or a list of strings, return a non empty list of strings.
Throws an exception in case the final list is empty or input data is not
a string or a python list
"""
finalList = makeList(stringList)
if not finalList:
raise ValueError("Input data cannot be an empty list %s" % stringList)
return finalList
def strToBool(string):
"""
Try to convert different variations of True or False (including a string
type object) to a boolean value.
In short:
* True gets mapped from: True, "True", "true", "TRUE".
* False gets mapped from: False, "False", "false", "FALSE"
* anything else will fail
:param string: expects a boolean or a string, but it could be anything else
:return: a boolean value, or raise an exception if value passed in is not supported
"""
if string is False or string is True:
return string
elif string in ["True", "true", "TRUE"]:
return True
elif string in ["False", "false", "FALSE"]:
return False
raise ValueError("Can't convert to bool: %s" % string)
def safeStr(string):
"""
_safeStr_
Cast simple data (int, float, basestring) to string.
"""
if not isinstance(string, (tuple, list, set, dict)):
return str(string)
raise ValueError("We're not supposed to convert %s to string." % string)
def diskUse():
"""
This returns the % use of each disk partition
"""
diskPercent = []
df = subprocess.Popen(["df", "-klP"], stdout=subprocess.PIPE)
output = df.communicate()[0]
output = decodeBytesToUnicode(output).split("\n")
for x in output:
split = x.split()
if split != [] and split[0] != 'Filesystem':
diskPercent.append({'mounted': split[5], 'percent': split[4]})
return diskPercent
def numberCouchProcess():
"""
This returns the number of couch process
"""
ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
process = ps.communicate()[0]
process = decodeBytesToUnicode(process).count('couchjs')
return process
def rootUrlJoin(base, extend):
"""
Adds a path element to the path within a ROOT url
"""
if base:
match = re.match("^root://([^/]+)/(.+)", base)
if match:
host = match.group(1)
path = match.group(2)
newpath = os.path.join(path, extend)
newurl = "root://%s/%s" % (host, newpath)
return newurl
return None
def zipEncodeStr(message, maxLen=5120, compressLevel=9, steps=100, truncateIndicator=" (...)"):
"""
_zipEncodeStr_
Utility to zip a string and encode it.
If zipped encoded length is greater than maxLen,
truncate message until zip/encoded version
is within the limits allowed.
"""
message = encodeUnicodeToBytes(message)
encodedStr = zlib.compress(message, compressLevel)
encodedStr = base64.b64encode(encodedStr)
if len(encodedStr) < maxLen or maxLen == -1:
return encodedStr
compressRate = 1. * len(encodedStr) / len(base64.b64encode(message))
# Estimate new length for message zip/encoded version
# to be less than maxLen.
# Also, append truncate indicator to message.
truncateIndicator = encodeUnicodeToBytes(truncateIndicator)
strLen = int((maxLen - len(truncateIndicator)) / compressRate)
message = message[:strLen] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
# If new length is not short enough, truncate
# recursively by steps
while len(encodedStr) > maxLen:
message = message[:-steps - len(truncateIndicator)] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
return encodedStr
def getSize(obj):
"""
_getSize_
Function to traverse an object and calculate its total size in bytes
:param obj: a python object
:return: an integer representing the total size of the object
Code extracted from Stack Overflow:
https://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
"""
# Custom objects know their class.
# Function objects seem to know way too much, including modules.
# Exclude modules as well.
BLACKLIST = type, ModuleType, FunctionType
if isinstance(obj, BLACKLIST):
raise TypeError('getSize() does not take argument of type: '+ str(type(obj)))
seen_ids = set()
size = 0
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, BLACKLIST) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
return size
def decodeBytesToUnicode(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of bytes (i.e. in py2 `str` or
`future.types.newbytes.newbytes`, in py3 `bytes`), then it is converted to
a sequence of unicode codepoints.
This function is useful for cleaning input data when using the
"unicode sandwich" approach, which involves converting bytes (i.e. strings
of type sequence of bytes) to unicode (i.e. strings of type sequence of
unicode codepoints, in py2 `unicode` or `future.types.newstr.newstr`,
in py3 `str` ) as soon as possible when recieving input data, and
converting unicode back to bytes as late as possible.
achtung!:
- converting unicode back to bytes is not covered by this function
- converting unicode back to bytes is not always necessary. when in doubt,
do not do it.
Reference: https://nedbatchelder.com/text/unipain.html
py2:
- "errors" can be: "strict", "ignore", "replace",
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, bytes):
return value.decode("utf-8", errors)
return value
def decodeBytesToUnicodeConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call decodeBytesToUnicode(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply decodeBytesToUnicode,
maintaining brevity.
Parameters
----------
value : any
passed to decodeBytesToUnicode
errors: str
passed to decodeBytesToUnicode
condition: boolean of object with attribute __bool__()
if True, then we run decodeBytesToUnicode. Usually PY2/PY3
"""
if condition:
return decodeBytesToUnicode(value, errors)
return value
def encodeUnicodeToBytes(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of unicode (i.e. in py2 `unicode` or
`future.types.newstr.newstr`, in py3 `str`), then it is converted to
a sequence of bytes.
This function is useful for encoding output data when using the
"unicode sandwich" approach, which involves converting unicode (i.e. strings
of type sequence of unicode codepoints) to bytes (i.e. strings of type
sequence of bytes, in py2 `str` or `future.types.newbytes.newbytes`,
in py3 `bytes`) as late as possible when passing a string to a third-party
function that only accepts bytes as input (pycurl's curl.setop is an
example).
py2:
- "errors" can be: "strict", "ignore", "replace", "xmlcharrefreplace"
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace",
"xmlcharrefreplace", "namereplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, str):
return value.encode("utf-8", errors)
return value
def encodeUnicodeToBytesConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call encodeUnicodeToBytes(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply encodeUnicodeToBytes,
maintaining brevity.
Parameters
----------
value : any
passed to encodeUnicodeToBytes
errors: str
passed to encodeUnicodeToBytes
condition: boolean of object with attribute __bool__()
if True, then we run encodeUnicodeToBytes. Usually PY2/PY3
"""
if condition:
return encodeUnicodeToBytes(value, errors)
return value | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/Utils/Utilities.py | 0.53777 | 0.283586 | Utilities.py | pypi |
import json
import urllib
from urllib.parse import urlparse, parse_qs, quote_plus
from collections import defaultdict
from Utils.CertTools import cert, ckey
from dbs.apis.dbsClient import aggFileLumis, aggFileParents
from WMCore.Services.pycurl_manager import getdata as multi_getdata
from Utils.PortForward import PortForward
def dbsListFileParents(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileParents API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file parents
"""
urls = ['%s/fileparents?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileParents
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsListFileLumis(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileLumis API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file lumis
"""
urls = ['%s/filelumis?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileLumis
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsBlockOrigin(dbsUrl, blocks):
"""
Concurrent counter part of DBS files API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of block origins for a given parent lfns
"""
urls = ['%s/blockorigin?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = None
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsParentFilesGivenParentDataset(dbsUrl, parentDataset, fInfo):
"""
Obtain parent files for given fileInfo object
:param dbsUrl: DBS URL
:param parentDataset: parent dataset name
:param fInfo: file info object
:return: list of parent files for given file info object
"""
portForwarder = PortForward(8443)
urls = []
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
urls.append(portForwarder(url))
func = None
uKey = None
rdict = getUrls(urls, func, uKey)
parentFiles = defaultdict(set)
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
url = portForwarder(url)
if url in rdict:
pFileList = rdict[url]
pFiles = {x['logical_file_name'] for x in pFileList}
parentFiles[fileInfo['logical_file_name']] = \
parentFiles[fileInfo['logical_file_name']].union(pFiles)
return parentFiles
def getUrls(urls, aggFunc, uKey=None):
"""
Perform parallel DBS calls for given set of urls and apply given aggregation
function to the results.
:param urls: list of DBS urls to call
:param aggFunc: aggregation function
:param uKey: url parameter to use for final dictionary
:return: dictionary of resuls where keys are urls and values are obtained results
"""
data = multi_getdata(urls, ckey(), cert())
rdict = {}
for row in data:
url = row['url']
code = int(row.get('code', 200))
error = row.get('error')
if code != 200:
msg = f"Fail to query {url}. Error: {code} {error}"
raise RuntimeError(msg)
if uKey:
key = urlParams(url).get(uKey)
else:
key = url
data = row.get('data', [])
res = json.loads(data)
if aggFunc:
rdict[key] = aggFunc(res)
else:
rdict[key] = res
return rdict
def urlParams(url):
"""
Return dictionary of URL parameters
:param url: URL link
:return: dictionary of URL parameters
"""
parsedUrl = urlparse(url)
rdict = parse_qs(parsedUrl.query)
for key, vals in rdict.items():
if len(vals) == 1:
rdict[key] = vals[0]
return rdict | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/Services/DBS/DBSUtils.py | 0.572484 | 0.162746 | DBSUtils.py | pypi |
from __future__ import (division, print_function)
from builtins import str, bytes
from Utils.Utilities import encodeUnicodeToBytes
from io import BytesIO
import re
import xml.etree.cElementTree as ET
int_number_pattern = re.compile(r'(^[0-9-]$|^[0-9-][0-9]*$)')
float_number_pattern = re.compile(r'(^[-]?\d+\.\d*$|^\d*\.{1,1}\d+$)')
def adjust_value(value):
"""
Change null value to None.
"""
pat_float = float_number_pattern
pat_integer = int_number_pattern
if isinstance(value, str):
if value == 'null' or value == '(null)':
return None
elif pat_float.match(value):
return float(value)
elif pat_integer.match(value):
return int(value)
else:
return value
else:
return value
def xml_parser(data, prim_key):
"""
Generic XML parser
:param data: can be of type "file object", unicode string or bytes string
"""
if isinstance(data, (str, bytes)):
stream = BytesIO()
data = encodeUnicodeToBytes(data, "ignore")
stream.write(data)
stream.seek(0)
else:
stream = data
context = ET.iterparse(stream)
for event, elem in context:
row = {}
key = elem.tag
if key != prim_key:
continue
row[key] = elem.attrib
get_children(elem, event, row, key)
elem.clear()
yield row
def get_children(elem, event, row, key):
"""
xml_parser helper function. It gets recursively information about
children for given element tag. Information is stored into provided
row for given key. The change of notations can be applied during
parsing step by using provided notations dictionary.
"""
for child in elem.getchildren():
child_key = child.tag
child_data = child.attrib
if not child_data:
child_dict = adjust_value(child.text)
else:
child_dict = child_data
if child.getchildren(): # we got grand-children
if child_dict:
row[key][child_key] = child_dict
else:
row[key][child_key] = {}
if isinstance(child_dict, dict):
newdict = {child_key: child_dict}
else:
newdict = {child_key: {}}
get_children(child, event, newdict, child_key)
row[key][child_key] = newdict[child_key]
else:
if not isinstance(row[key], dict):
row[key] = {}
row[key].setdefault(child_key, [])
row[key][child_key].append(child_dict)
child.clear() | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/Services/TagCollector/XMLUtils.py | 0.567697 | 0.201794 | XMLUtils.py | pypi |
from __future__ import division
from builtins import object
from datetime import timedelta, datetime
import socket
import json
import logging
from WMCore.Services.pycurl_manager import RequestHandler
from Utils.Timers import LocalTimezone
class AlertManagerAPI(object):
"""
A class used to send alerts via the MONIT AlertManager API
"""
def __init__(self, alertManagerUrl, logger=None):
self.alertManagerUrl = alertManagerUrl
# sender's hostname is added as an annotation
self.hostname = socket.gethostname()
self.mgr = RequestHandler()
self.ltz = LocalTimezone()
self.headers = {"Content-Type": "application/json"}
self.validSeverity = ["high", "medium", "low"]
self.logger = logger if logger else logging.getLogger()
def sendAlert(self, alertName, severity, summary, description, service, tag="wmcore", endSecs=600, generatorURL=""):
"""
:param alertName: a unique name for the alert
:param severity: low, medium, high
:param summary: a short description of the alert
:param description: a longer informational message with details about the alert
:param service: the name of the service firing an alert
:param tag: a unique tag used to help route the alert
:param endSecs: how many minutes until the alarm is silenced
:param generatorURL: this URL will be sent to AlertManager and configured as a clickable "Source" link in the web interface
AlertManager JSON format reference: https://www.prometheus.io/docs/alerting/latest/clients/
[
{
"labels": {
"alertname": "<requiredAlertName>",
"<labelname>": "<labelvalue>",
...
},
"annotations": {
"<labelname>": "<labelvalue>",
...
},
"startsAt": "<rfc3339>", # optional, will be current time if not present
"endsAt": "<rfc3339>",
"generatorURL": "<generator_url>" # optional
},
]
"""
if not self._isValidSeverity(severity):
return False
request = []
alert = {}
labels = {}
annotations = {}
# add labels
labels["alertname"] = alertName
labels["severity"] = severity
labels["tag"] = tag
labels["service"] = service
alert["labels"] = labels
# add annotations
annotations["hostname"] = self.hostname
annotations["summary"] = summary
annotations["description"] = description
alert["annotations"] = annotations
# In python3 we won't need the LocalTimezone class
# Will change to d = datetime.now().astimezone() + timedelta(seconds=endSecs)
d = datetime.now(self.ltz) + timedelta(seconds=endSecs)
alert["endsAt"] = d.isoformat("T")
alert["generatorURL"] = generatorURL
request.append(alert)
# need to do this because pycurl_manager only accepts dict and encoded strings type
params = json.dumps(request)
res = self.mgr.getdata(self.alertManagerUrl, params=params, headers=self.headers, verb='POST')
return res
def _isValidSeverity(self, severity):
"""
Used to check if the severity of the alert matches the valid levels: low, medium, high
:param severity: severity of the alert
:return: True or False
"""
if severity not in self.validSeverity:
logging.critical("Alert submitted to AlertManagerAPI with invalid severity: %s", severity)
return False
return True | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/Services/AlertManager/AlertManagerAPI.py | 0.810554 | 0.161849 | AlertManagerAPI.py | pypi |
from __future__ import print_function
from builtins import str, bytes, int
from future.utils import viewitems
from Utils.PythonVersion import PY2
import sys
import types
class _EmptyClass(object):
pass
class JSONThunker(object):
"""
_JSONThunker_
Converts an arbitrary object to <-> from a jsonable object.
Will, for the most part "do the right thing" about various instance objects
by storing their class information along with their data in a dict. Handles
a recursion limit to prevent infinite recursion.
self.passThroughTypes - stores a list of types that should be passed
through unchanged to the JSON parser
self.blackListedModules - a list of modules that should not be stored in
the JSON.
"""
def __init__(self):
self.passThroughTypes = (type(None),
bool,
int,
float,
complex,
str,
bytes,
)
# objects that inherit from dict should be treated as a dict
# they don't store their data in __dict__. There was enough
# of those classes that it warrented making a special case
self.dictSortOfObjects = (('WMCore.Datastructs.Job', 'Job'),
('WMCore.WMBS.Job', 'Job'),
('WMCore.Database.CMSCouch', 'Document'))
# ditto above, but for lists
self.listSortOfObjects = (('WMCore.DataStructs.JobPackage', 'JobPackage'),
('WMCore.WMBS.JobPackage', 'JobPackage'),)
self.foundIDs = {}
# modules we don't want JSONed
self.blackListedModules = ('sqlalchemy.engine.threadlocal',
'WMCore.Database.DBCore',
'logging',
'WMCore.DAOFactory',
'WMCore.WMFactory',
'WMFactory',
'WMCore.Configuration',
'WMCore.Database.Transaction',
'threading',
'datetime')
def checkRecursion(self, data):
"""
handles checking for infinite recursion
"""
if id(data) in self.foundIDs:
if self.foundIDs[id(data)] > 5:
self.unrecurse(data)
return "**RECURSION**"
else:
self.foundIDs[id(data)] += 1
return data
else:
self.foundIDs[id(data)] = 1
return data
def unrecurse(self, data):
"""
backs off the recursion counter if we're returning from _thunk
"""
try:
self.foundIDs[id(data)] -= 1
except:
print("Could not find count for id %s of type %s data %s" % (id(data), type(data), data))
raise
def checkBlackListed(self, data):
"""
checks to see if a given object is from a blacklisted module
"""
try:
# special case
if data.__class__.__module__ == 'WMCore.Database.CMSCouch' and data.__class__.__name__ == 'Document':
data.__class__ = type({})
return data
if data.__class__.__module__ in self.blackListedModules:
return "Blacklisted JSON object: module %s, name %s, str() %s" % \
(data.__class__.__module__, data.__class__.__name__, str(data))
else:
return data
except Exception:
return data
def thunk(self, toThunk):
"""
Thunk - turns an arbitrary object into a JSONable object
"""
self.foundIDs = {}
data = self._thunk(toThunk)
return data
def unthunk(self, data):
"""
unthunk - turns a previously 'thunked' object back into a python object
"""
return self._unthunk(data)
def handleSetThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
tempDict = {'thunker_encoded_json': True, 'type': 'set'}
tempDict['set'] = self._thunk(list(toThunk))
self.unrecurse(toThunk)
return tempDict
def handleListThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
for k, v in enumerate(toThunk):
toThunk[k] = self._thunk(v)
self.unrecurse(toThunk)
return toThunk
def handleDictThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
special = False
tmpdict = {}
for k, v in viewitems(toThunk):
if type(k) == type(int):
special = True
tmpdict['_i:%s' % k] = self._thunk(v)
elif type(k) == type(float):
special = True
tmpdict['_f:%s' % k] = self._thunk(v)
else:
tmpdict[k] = self._thunk(v)
if special:
toThunk['thunker_encoded_json'] = self._thunk(True)
toThunk['type'] = self._thunk('dict')
toThunk['dict'] = tmpdict
else:
toThunk.update(tmpdict)
self.unrecurse(toThunk)
return toThunk
def handleObjectThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
toThunk = self.checkBlackListed(toThunk)
if isinstance(toThunk, (str, bytes)):
# things that got blacklisted
return toThunk
if hasattr(toThunk, '__to_json__'):
# Use classes own json thunker
toThunk2 = toThunk.__to_json__(self)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, dict):
toThunk2 = self.handleDictObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, list):
# a mother thunking list
toThunk2 = self.handleListObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
else:
try:
thunktype = '%s.%s' % (toThunk.__class__.__module__,
toThunk.__class__.__name__)
tempDict = {'thunker_encoded_json': True, 'type': thunktype}
tempDict[thunktype] = self._thunk(toThunk.__dict__)
self.unrecurse(toThunk)
return tempDict
except Exception as e:
tempDict = {'json_thunk_exception_': "%s" % e}
self.unrecurse(toThunk)
return tempDict
def handleDictObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_dict': True,
'type': thunktype,
thunktype: {}}
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
for k, v in viewitems(data):
tempDict[thunktype][k] = self._thunk(v)
return tempDict
def handleDictObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_dict', False)
thunktype = data.pop('type', False)
for k, v in viewitems(data):
if k == thunktype:
for k2, v2 in viewitems(data[thunktype]):
value[k2] = self._unthunk(v2)
else:
value.__dict__[k] = self._unthunk(v)
return value
def handleListObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_list': True,
'type': thunktype,
thunktype: []}
for k, v in enumerate(data):
tempDict['thunktype'].append(self._thunk(v))
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
return tempDict
def handleListObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_list', False)
thunktype = data.pop('type')
for k, v in viewitems(data[thunktype]):
setattr(value, k, self._unthunk(v))
for k, v in viewitems(data):
if k == thunktype:
continue
value.__dict__ = self._unthunk(v)
return value
def _thunk(self, toThunk):
"""
helper function for thunk, does the actual work
"""
if isinstance(toThunk, self.passThroughTypes):
return toThunk
elif type(toThunk) is list:
return self.handleListThunk(toThunk)
elif type(toThunk) is dict:
return self.handleDictThunk(toThunk)
elif type(toThunk) is set:
return self.handleSetThunk(toThunk)
elif type(toThunk) is types.FunctionType:
self.unrecurse(toThunk)
return "function reference"
elif isinstance(toThunk, object):
return self.handleObjectThunk(toThunk)
else:
self.unrecurse(toThunk)
raise RuntimeError(type(toThunk))
def _unthunk(self, jsondata):
"""
_unthunk - does the actual work for unthunk
"""
if PY2 and type(jsondata) is str:
return jsondata.encode("utf-8")
if type(jsondata) is dict:
if 'thunker_encoded_json' in jsondata:
# we've got a live one...
if jsondata['type'] == 'set':
newSet = set()
for i in self._unthunk(jsondata['set']):
newSet.add(self._unthunk(i))
return newSet
if jsondata['type'] == 'dict':
# We have a "special" dict
data = {}
for k, v in viewitems(jsondata['dict']):
tmp = self._unthunk(v)
if k.startswith('_i:'):
data[int(k.lstrip('_i:'))] = tmp
elif k.startswith('_f:'):
data[float(k.lstrip('_f:'))] = tmp
else:
data[k] = tmp
return data
else:
# spawn up an instance.. good luck
# here be monsters
# inspired from python's pickle code
ourClass = self.getThunkedClass(jsondata)
value = _EmptyClass()
if hasattr(ourClass, '__from_json__'):
# Use classes own json loader
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = ourClass.__from_json__(value, jsondata, self)
elif 'thunker_encoded_json' in jsondata and 'is_dict' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleDictObjectUnThunk(value, jsondata)
elif 'thunker_encoded_json' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleListObjectUnThunk(value, jsondata)
else:
raise RuntimeError('Could not unthunk a class. Code to try was removed because it had errors.')
return value
else:
data = {}
for k, v in viewitems(jsondata):
data[k] = self._unthunk(v)
return data
else:
return jsondata
@staticmethod
def getThunkedClass(jsondata):
"""
Work out the class from it's thunked json representation
"""
module = jsondata['type'].rsplit('.', 1)[0]
name = jsondata['type'].rsplit('.', 1)[1]
if (module == 'WMCore.Services.Requests') and (name == JSONThunker):
raise RuntimeError("Attempted to unthunk a JSONThunker..")
__import__(module)
mod = sys.modules[module]
ourClass = getattr(mod, name)
return ourClass | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/Wrappers/JsonWrapper/JSONThunker.py | 0.443118 | 0.360208 | JSONThunker.py | pypi |
from builtins import str as newstr
import random, cherrypy
class RESTError(Exception):
"""Base class for REST errors.
.. attribute:: http_code
Integer, HTTP status code for this error. Also emitted as X-Error-HTTP
header value.
.. attribute:: app_code
Integer, application error code, to be emitted as X-REST-Status header.
.. attribute:: message
String, information about the error, to be emitted as X-Error-Detail
header. Should not contain anything sensitive, and in particular should
never include any unvalidated or unsafe data, e.g. input parameters or
data from a database. Normally a fixed label with one-to-one match with
the :obj:`app-code`. If the text exceeds 200 characters, it's truncated.
Since this is emitted as a HTTP header, it cannot contain newlines or
anything encoding-dependent.
.. attribute:: info
String, additional information beyond :obj:`message`, to be emitted as
X-Error-Info header. Like :obj:`message` should not contain anything
sensitive or unsafe, or text inappropriate for a HTTP response header,
and should be short enough to fit in 200 characters. This is normally
free form text to clarify why the error happened.
.. attribute:: errid
String, random unique identifier for this error, to be emitted as
X-Error-ID header and output into server logs when logging the error.
The purpose is that clients save this id when they receive an error,
and further error reporting or debugging can use this value to identify
the specific error, and for example to grep logs for more information.
.. attribute:: errobj
If the problem was caused by another exception being raised in the code,
reference to the original exception object. For example if the code dies
with an :class:`KeyError`, this is the original exception object. This
error is logged to the server logs when reporting the error, but no
information about it is returned to the HTTP client.
.. attribute:: trace
The origin of the exception as returned by :func:`format_exc`. The full
trace is emitted to the server logs, each line prefixed with timestamp.
This information is not returned to the HTTP client.
"""
http_code = None
app_code = None
message = None
info = None
errid = None
errobj = None
trace = None
def __init__(self, info = None, errobj = None, trace = None):
self.errid = "%032x" % random.randrange(1 << 128)
self.errobj = errobj
self.info = info
self.trace = trace
def __str__(self):
return "%s %s [HTTP %d, APP %d, MSG %s, INFO %s, ERR %s]" \
% (self.__class__.__name__, self.errid, self.http_code, self.app_code,
repr(self.message).replace("\n", " ~~ "),
repr(self.info).replace("\n", " ~~ "),
repr(self.errobj).replace("\n", " ~~ "))
class NotAcceptable(RESTError):
"Client did not specify format it accepts, or no compatible format was found."
http_code = 406
app_code = 201
message = "Not acceptable"
class UnsupportedMethod(RESTError):
"Client used HTTP request method which isn't supported for any API call."
http_code = 405
app_code = 202
message = "Request method not supported"
class MethodWithoutQueryString(RESTError):
"Client provided a query string which isn't acceptable for this request method."
http_code = 405
app_code = 203
message = "Query arguments not supported for this request method"
class APIMethodMismatch(RESTError):
"""Both the API and HTTP request methods are supported, but not in that
combination."""
http_code = 405
app_code = 204
message = "API not supported for this request method"
class APINotSpecified(RESTError):
"The request URL is missing API argument."
http_code = 400
app_code = 205
message = "API not specified"
class NoSuchInstance(RESTError):
"""The request URL is missing instance argument or the specified instance
does not exist."""
http_code = 404
app_code = 206
message = "No such instance"
class APINotSupported(RESTError):
"The request URL provides wrong API argument."
http_code = 404
app_code = 207
message = "API not supported"
class DataCacheEmpty(RESTError):
"The wmstats data cache has not be created."
http_code = 503
app_code = 208
message = "DataCache is Empty"
class DatabaseError(RESTError):
"""Parent class for database-related errors.
.. attribute: lastsql
A tuple of *(sql, binds, kwbinds),* where `sql` is the last SQL statement
executed and `binds`, `kwbinds` are the bind values used with it. Any
sensitive parts like passwords have already been censored from the `sql`
string. Note that for massive requests `binds` or `kwbinds` can get large.
These are logged out in the server logs when reporting the error, but no
information about these are returned to the HTTP client.
.. attribute: intance
String, the database instance for which the error occurred. This is
reported in the error message output to server logs, but no information
about this is returned to the HTTP client."""
lastsql = None
instance = None
def __init__(self, info = None, errobj = None, trace = None,
lastsql = None, instance = None):
RESTError.__init__(self, info, errobj, trace)
self.lastsql = lastsql
self.instance = instance
class DatabaseUnavailable(DatabaseError):
"""The instance argument is correct, but cannot connect to the database.
This error will only occur at initial attempt to connect to the database,
:class:`~.DatabaseConnectionError` is raised instead if the connection
ends prematurely after the transaction has already begun successfully."""
http_code = 503
app_code = 401
message = "Database unavailable"
class DatabaseConnectionError(DatabaseError):
"""Database was available when the operation started, but the connection
was lost or otherwise failed during the application operation."""
http_code = 504
app_code = 402
message = "Database connection failure"
class DatabaseExecutionError(DatabaseError):
"""Database operation failed."""
http_code = 500
app_code = 403
message = "Execution error"
class MissingParameter(RESTError):
"Client did not supply a parameter which is required."
http_code = 400
app_code = 301
message = "Missing required parameter"
class InvalidParameter(RESTError):
"Client supplied invalid value for a parameter."
http_code = 400
app_code = 302
message = "Invalid input parameter"
class MissingObject(RESTError):
"""An object required for the operation is missing. This might be a
pre-requisite needed to create a reference, or attempt to delete
an object which does not exist."""
http_code = 400
app_code = 303
message = "Required object is missing"
class TooManyObjects(RESTError):
"""Too many objects matched specified criteria. Usually this means
more than one object was matched, deleted, or inserted, when only
exactly one should have been subject to the operation."""
http_code = 400
app_code = 304
message = "Too many objects"
class ObjectAlreadyExists(RESTError):
"""An already existing object is on the way of the operation. This
is usually caused by uniqueness constraint violations when creating
new objects."""
http_code = 400
app_code = 305
message = "Object already exists"
class InvalidObject(RESTError):
"The specified object is invalid."
http_code = 400
app_code = 306
message = "Invalid object"
class ExecutionError(RESTError):
"""Input was in principle correct but there was an error processing
the request. This normally means either programming error, timeout, or
an unusual and unexpected problem with the database. For security reasons
little additional information is returned. If the problem persists, client
should contact service operators. The returned error id can be used as a
reference."""
http_code = 500
app_code = 403
message = "Execution error"
def report_error_header(header, val):
"""If `val` is non-empty, set CherryPy response `header` to `val`.
Replaces all newlines with "; " characters. If the resulting value is
longer than 200 characters, truncates it to the first 197 characters
and leaves a trailing ellipsis "..."."""
if val:
val = val.replace("\n", "; ")
if len(val) > 200: val = val[:197] + "..."
cherrypy.response.headers[header] = val
def report_rest_error(err, trace, throw):
"""Report a REST error: generate an appropriate log message, set the
response headers and raise an appropriate :class:`~.HTTPError`.
Normally `throw` would be True to translate the exception `err` into
a HTTP server error, but the function can also be called with `throw`
set to False if the purpose is merely to log an exception message.
:arg err: exception object.
:arg trace: stack trace to use in case `err` doesn't have one.
:arg throw: raise a :class:`~.HTTPError` if True."""
if isinstance(err, DatabaseError) and err.errobj:
offset = None
sql, binds, kwbinds = err.lastsql
if sql and err.errobj.args and hasattr(err.errobj.args[0], 'offset'):
offset = err.errobj.args[0].offset
sql = sql[:offset] + "<**>" + sql[offset:]
cherrypy.log("SERVER DATABASE ERROR %d/%d %s %s.%s %s [instance: %s] (%s);"
" last statement: %s; binds: %s, %s; offset: %s"
% (err.http_code, err.app_code, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
err.errid, err.instance, newstr(err.errobj).rstrip(),
sql, binds, kwbinds, offset))
for line in err.trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, RESTError):
if err.errobj:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s); derived from %s.%s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
newstr(err.errobj).rstrip()))
trace = err.trace
else:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, cherrypy.HTTPError):
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER HTTP ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(200)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.status)
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", err._message)
if throw: raise err
else:
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER OTHER ERROR %s.%s %s (%s)"
% (getattr(err, "__module__", "__builtins__"),
err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = 400
cherrypy.response.headers["X-Error-HTTP"] = 500
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", "Server error")
if throw: raise cherrypy.HTTPError(500, "Server error") | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/REST/Error.py | 0.835752 | 0.247783 | Error.py | pypi |
from builtins import str as newstr, bytes as newbytes
from WMCore.REST.Error import *
import math
import re
import numbers
from Utils.Utilities import decodeBytesToUnicodeConditional, encodeUnicodeToBytesConditional
from Utils.PythonVersion import PY3, PY2
def return_message(main_err, custom_err):
if custom_err:
return custom_err
return main_err
def _arglist(argname, kwargs):
val = kwargs.get(argname, None)
if val == None:
return []
elif not isinstance(val, list):
return [ val ]
else:
return val
def _check_rx(argname, val, custom_err = None):
if not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
return re.compile(val)
except:
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
def _check_str(argname, val, rx, custom_err = None):
"""
This is not really check val is ASCII.
2021 09: we are now using version 17.4.0 -> we do not need to convert to
bytes here anymore, we are using a recent verison of cherrypy.
We merged the funcionality of _check_str and _check_ustr into a single function
:type val: str or bytes (only utf8 encoded string) in py3, unicode or str in py2
:type rx: regex, compiled from native str (unicode in py3, bytes in py2)
"""
val = decodeBytesToUnicodeConditional(val, condition=PY3)
val = encodeUnicodeToBytesConditional(val, condition=PY2)
# `val` should now be a "native str" (unicode in py3, bytes in py2)
# here str has not been redefined. it is default `str` in both py2 and py3.
if not isinstance(val, str) or not rx.match(val):
raise InvalidParameter(return_message("Incorrect '%s' parameter %s %s" % (argname, type(val), val), custom_err))
return val
def _check_num(argname, val, bare, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Integral) and (not isinstance(val, (newstr, newbytes)) or (bare and not val.isdigit())):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = int(val)
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _check_real(argname, val, special, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Number) and not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = float(val)
if not special and (math.isnan(n) or math.isinf(n)):
raise InvalidParameter(return_message("Parameter '%s' improper value" % argname, custom_err))
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _validate_one(argname, param, safe, checker, optional, *args):
val = param.kwargs.get(argname, None)
if optional and val == None:
safe.kwargs[argname] = None
else:
safe.kwargs[argname] = checker(argname, val, *args)
del param.kwargs[argname]
def _validate_all(argname, param, safe, checker, *args):
safe.kwargs[argname] = [checker(argname, v, *args) for v in _arglist(argname, param.kwargs)]
if argname in param.kwargs:
del param.kwargs[argname]
def validate_rx(argname, param, safe, optional = False, custom_err = None):
"""Validates that an argument is a valid regexp.
Checks that an argument named `argname` exists in `param.kwargs`,
and it a string which compiles into a python regular expression.
If successful, the regexp object (not the string) is copied into
`safe.kwargs` and the string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_rx, optional, custom_err)
def validate_str(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
Accepts both unicode strings and utf8-encoded bytes strings as argument
string.
Accepts regex compiled only with "native strings", which means str in both
py2 and py3 (unicode in py3, bytes of utf8-encoded strings in py2)
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_ustr(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp,
During the py2->py3 modernization, _check_str and _check_ustr have been
merged into a single function called _check_str.
This function is now the same as validate_str, but is kept nonetheless
not to break our client's code.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_num(argname, param, safe, optional = False,
bare = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid integer number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is an int or a string convertible to a valid number. If successful
the integer value (not the string) is copied into `safe.kwargs`
and the original int/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
If `bare` is True, the number is required to be a pure digit sequence if it is a string.
Otherwise anything accepted by `int(val)` is acceted, including for
example leading white space or sign. Note that either way arbitrarily
large values are accepted; if you want to prevent abuse against big
integers, use the `minval` and `maxval` thresholds described below,
or check the length the of the string against some limit first.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_num, optional, bare, minval, maxval, custom_err)
def validate_real(argname, param, safe, optional = False,
special = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid real number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is float number or a string convertible to a valid number. If successful
the float value (not the string) is copied into `safe.kwargs`
and the original float/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
Anything accepted by `float(val)` is accepted, including for example
leading white space, sign and exponent. However NaN and +/- Inf are
rejected unless `special` is True.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_real, optional, special, minval, maxval, custom_err)
def validate_rxlist(argname, param, safe, custom_err = None):
"""Validates that an argument is an array of strings, each of which
can be compiled into a python regexp object.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which compiles into a regular expression.
If successful the array is copied into `safe.kwargs` and the value is
removed from `param.kwargs`. The value always becomes an array in
`safe.kwargs`, even if no or only one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_rx, custom_err)
def validate_strlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_ustrlist` instead if the argument string might need
to be converted from utf-8 into unicode first. Use this method only
for inputs which are meant to be bare strings.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_str, rx, custom_err)
def validate_ustrlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp once converted from utf-8 into unicode.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_strlist` instead if the argument strings should always
be bare strings. This one automatically converts everything into
unicode and expects input exclusively in utf-8, which may not be
appropriate constraints for some uses.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_ustr, rx, custom_err)
def validate_numlist(argname, param, safe, bare=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_num()`.
Checks that an argument named `argname` is either a single string/int or
an array of strings/int, each of which validates with `validate_num` and
`bare`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `kwsafe`, even if no or only one
argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_num, bare, minval, maxval, custom_err)
def validate_reallist(argname, param, safe, special=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_real()`.
Checks that an argument named `argname` is either a single string/float or
an array of strings/floats, each of which validates with `validate_real` and
`special`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `safe.kwargs`, even if no or only
one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_real, special, minval, maxval, custom_err)
def validate_no_more_input(param):
"""Verifies no more input is left in `param.args` or `param.kwargs`."""
if param.args:
raise InvalidParameter("Excess path arguments, not validated args='%s'" % param.args)
if param.kwargs:
raise InvalidParameter("Excess keyword arguments, not validated kwargs='%s'" % param.kwargs)
def validate_lengths(safe, *names):
"""Verifies that all `names` exist in `safe.kwargs`, are lists, and
all the lists have the same length. This is convenience function for
checking that an API accepting multiple values receives equal number
of values for all of its parameters."""
refname = names[0]
if refname not in safe.kwargs or not isinstance(safe.kwargs[refname], list):
raise InvalidParameter("Incorrect '%s' parameter" % refname)
reflen = len(safe.kwargs[refname])
for other in names[1:]:
if other not in safe.kwargs or not isinstance(safe.kwargs[other], list):
raise InvalidParameter("Incorrect '%s' parameter" % other)
elif len(safe.kwargs[other]) != reflen:
raise InvalidParameter("Mismatched number of arguments: %d %s vs. %d %s"
% (reflen, refname, len(safe.kwargs[other]), other)) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/REST/Validation.py | 0.600774 | 0.233335 | Validation.py | pypi |
from __future__ import print_function
import gzip
from builtins import str, bytes, object
from Utils.PythonVersion import PY3
from Utils.Utilities import encodeUnicodeToBytes, encodeUnicodeToBytesConditional
from future.utils import viewitems
import hashlib
import json
import xml.sax.saxutils
import zlib
from traceback import format_exc
import cherrypy
from WMCore.REST.Error import RESTError, ExecutionError, report_rest_error
try:
from cherrypy.lib import httputil
except ImportError:
from cherrypy.lib import http as httputil
def vary_by(header):
"""Add 'Vary' header for `header`."""
varies = cherrypy.response.headers.get('Vary', '')
varies = [x.strip() for x in varies.split(",") if x.strip()]
if header not in varies:
varies.append(header)
cherrypy.response.headers['Vary'] = ", ".join(varies)
def is_iterable(obj):
"""Check if `obj` is iterable."""
try:
iter(obj)
except TypeError:
return False
else:
return True
class RESTFormat(object):
def __call__(self, stream, etag):
"""Main entry point for generating output for `stream` using `etag`
object to generate ETag header. Returns a generator function for
producing a verbatim copy of `stream` item, including any premables
and trailers needed for the selected format. The intention is that
the caller will use the iterable to generate chunked HTTP transfer
encoding, or a simple result such as an image."""
# Make 'stream' iterable. We convert everything to chunks here.
# The final stream consumer will collapse small responses back
# to a single string. Convert files to 1MB chunks.
if stream is None:
stream = ['']
elif isinstance(stream, (str, bytes)):
stream = [stream]
elif hasattr(stream, "read"):
# types.FileType is not available anymore in python3,
# using it raises pylint W1624.
# Since cherrypy.lib.file_generator only uses the .read() attribute
# of a file, we simply check if stream.read() is present instead.
# https://github.com/cherrypy/cherrypy/blob/2a8aaccd649eb1011382c39f5cd93f76f980c0b1/cherrypy/lib/__init__.py#L64
stream = cherrypy.lib.file_generator(stream, 512 * 1024)
return self.stream_chunked(stream, etag, *self.chunk_args(stream))
def chunk_args(self, stream):
"""Return extra arguments needed for `stream_chunked()`. The default
return an empty tuple, so no extra arguments. Override in the derived
class if `stream_chunked()` needs preamble or trailer arguments."""
return tuple()
class XMLFormat(RESTFormat):
"""Format an iterable of objects into XML encoded in UTF-8.
Generates normally first a preamble, a stream of XML-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then XML encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if iterating over input is
deterministic. Beware in particular the key order for a dict is
arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is generated as an XML document whose top-level entity name
is defined by the label given at the formatter construction time. The
caller must define ``cherrypy.request.rest_generate_data`` to element
name for wrapping stream contents. Usually the top-level entity is the
application name and the ``cherrypy.request.rest_generate_data`` is
``result``.
Iterables are output as ``<array><i>ITEM</i><i>ITEM</i></array>``,
dictionaries as ``<dict><key>KEY</key><value>VALUE</value></dict>``.
`None` is output as empty contents, and hence there is no way to
distinguish `None` and an empty string from each other. Scalar types
are output as rendered by `str()`, but obviously XML encoding unsafe
characters. This class does not support formatting arbitrary types.
The formatter does not insert any spaces into the output. Although the
output is generated as a preamble, stream of objects, and trailer just
like by the `JSONFormatter`, each of which is a separate HTTP transfer
chunk, the output does *not* have guaranteed line-oriented structure
like the `JSONFormatter` produces. Note in particular that if the data
stream contains strings with newlines, the output will have arbitrary
line structure. On the other hand, as the output is well-formed XML,
virtually all SAX processors can read the stream incrementally even if
the client isn't able to fully preserve chunked HTTP transfer encoding."""
def __init__(self, label):
self.label = label
@staticmethod
def format_obj(obj):
"""Render an object `obj` into XML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
result = xml.sax.saxutils.escape(obj).encode("utf-8")
elif isinstance(obj, bytes):
result = xml.sax.saxutils.escape(obj)
elif isinstance(obj, (int, float, bool)):
result = xml.sax.saxutils.escape(str(obj)).encode("utf-8")
elif isinstance(obj, dict):
result = "<dict>"
for k, v in viewitems(obj):
result += "<key>%s</key><value>%s</value>" % \
(xml.sax.saxutils.escape(k).encode("utf-8"),
XMLFormat.format_obj(v))
result += "</dict>"
elif is_iterable(obj):
result = "<array>"
for v in obj:
result += "<i>%s</i>" % XMLFormat.format_obj(v)
result += "</array>"
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = XMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n"
preamble += "<%s>" % self.label
if cherrypy.request.rest_generate_preamble:
desc = self.format_obj(cherrypy.request.rest_generate_preamble)
preamble += "<desc>%s</desc>" % desc
preamble += "<%s>" % cherrypy.request.rest_generate_data
trailer = "</%s></%s>" % (cherrypy.request.rest_generate_data, self.label)
return preamble, trailer
class JSONFormat(RESTFormat):
"""Format an iterable of objects into JSON.
Generates normally first a preamble, a stream of JSON-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then JSON encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if `cjson.encode()` output is
deterministic for the input. Beware in particular the key order for a
dict is arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is always generated as a JSON dictionary. The caller must
define ``cherrypy.request.rest_generate_data`` as the key for actual
contents, usually something like "result". The `stream` value will be
generated as an array value for that key.
If ``cherrypy.request.rest_generate_preamble`` is a non-empty list, it
is output as the ``desc`` key value in the preamble before outputting
the `stream` contents. Otherwise the output consists solely of `stream`.
A common use of ``rest_generate_preamble`` is list of column labels
with `stream` an iterable of lists of column values.
The output is guaranteed to contain one line of preamble which starts a
dictionary and an array ("``{key: [``"), one line of JSON rendering of
each object in `stream`, with the first line starting with exactly one
space and second and subsequent lines starting with a comma, and one
final trailer line consisting of "``]}``". Each line is generated as a
HTTP transfer chunk. This format is fixed so readers can be constructed
to read and parse the stream incrementally one line at a time,
facilitating maximum throughput processing of the response."""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
obj = None
try:
for obj in stream:
chunk = comma + json.dumps(obj) + "\n"
etag.update(chunk)
yield chunk
comma = ","
except cherrypy.HTTPError:
raise
except GeneratorExit:
etag.invalidate()
trailer = None
raise
except Exception as exp:
print("ERROR, json.dumps failed to serialize %s, type %s\nException: %s" \
% (obj, type(obj), str(exp)))
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except cherrypy.HTTPError:
raise
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as JSON reply."""
comma = ""
preamble = "{"
trailer = "]}\n"
if cherrypy.request.rest_generate_preamble:
desc = json.dumps(cherrypy.request.rest_generate_preamble)
preamble += '"desc": %s' % desc
comma = ", "
preamble += '%s"%s": [\n' % (comma, cherrypy.request.rest_generate_data)
return preamble, trailer
class PrettyJSONFormat(JSONFormat):
""" Format used for human, (web browser)"""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = comma + json.dumps(obj, indent=2)
etag.update(chunk)
yield chunk
comma = ","
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
class PrettyJSONHTMLFormat(PrettyJSONFormat):
""" Format used for human, (web browser) wrap around html tag on json"""
@staticmethod
def format_obj(obj):
"""Render an object `obj` into HTML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
obj = xml.sax.saxutils.quoteattr(obj)
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, bytes):
obj = xml.sax.saxutils.quoteattr(str(obj, "utf-8"))
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, (int, float, bool)):
result = "%s" % obj
elif isinstance(obj, dict):
result = "<ul>"
for k, v in viewitems(obj):
result += "<li><b>%s</b>: %s</li>" % (k, PrettyJSONHTMLFormat.format_obj(v))
result += "</ul>"
elif is_iterable(obj):
empty = True
result = "<details open><ul>"
for v in obj:
empty = False
result += "<li>%s</li>" % PrettyJSONHTMLFormat.format_obj(v)
result += "</ul></details>"
if empty:
result = ""
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = PrettyJSONHTMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<html><body>"
trailer = "</body></html>"
return preamble, trailer
class RawFormat(RESTFormat):
"""Format an iterable of objects as raw data.
Generates raw data completely unmodified, for example image data or
streaming arbitrary external data files including even plain text.
Computes an ETag on the output in the process. The result is always
chunked, even simple strings on input. Usually small enough responses
will automatically be converted back to a single string response post
compression and ETag processing.
Any exceptions raised by input stream are reported to `report_rest_error`
and swallowed, as this is normally used to generate output for CherryPy
responses, which cannot handle exceptions reasonably after the output
generation begins; later processing may reconvert those back to exceptions
however (cf. stream_maybe_etag()). A X-REST-Status trailer header is added
if (and only if) an exception occurs; the client must inspect that to find
out if it got the complete output. There is normally 'X-REST-Status: 100'
in normal response headers, and it remains valid in case of success.
No ETag header is generated in case of an exception."""
def stream_chunked(self, stream, etag):
"""Generator for actually producing the output."""
try:
for chunk in stream:
etag.update(chunk)
yield chunk
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
except BaseException:
etag.invalidate()
raise
class DigestETag(object):
"""Compute hash digest over contents for ETag header."""
algorithm = None
def __init__(self, algorithm=None):
"""Prepare ETag computer."""
self.digest = hashlib.new(algorithm or self.algorithm)
def update(self, val):
"""Process response data `val`."""
if self.digest:
self.digest.update(encodeUnicodeToBytes(val))
def value(self):
"""Return ETag header value for current input."""
return self.digest and '"%s"' % self.digest.hexdigest()
def invalidate(self):
"""Invalidate the ETag calculator so value() will return None."""
self.digest = None
class MD5ETag(DigestETag):
"""Compute MD5 hash over contents for ETag header."""
algorithm = 'md5'
class SHA1ETag(DigestETag):
"""Compute SHA1 hash over contents for ETag header."""
algorithm = 'sha1'
def _stream_compress_identity(reply, *args):
"""Streaming compressor which returns original data unchanged."""
return reply
def _stream_compress_deflate(reply, compress_level, max_chunk):
"""Streaming compressor for the 'deflate' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
# Create zlib compression object, with raw data stream (negative window size)
z = zlib.compressobj(compress_level, zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
# Data pending compression. We only take entire chunks from original
# reply. Then process reply one chunk at a time. Whenever we have enough
# data to compress, spit it out flushing the zlib engine entirely, so we
# respect original chunk boundaries.
npending = 0
pending = []
for chunk in reply:
pending.append(chunk)
npending += len(chunk)
if npending >= max_chunk:
part = z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FULL_FLUSH)
pending = []
npending = 0
yield part
# Crank the compressor one more time for remaining output.
if npending:
yield z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FINISH)
def _stream_compress_gzip(reply, compress_level, *args):
"""Streaming compressor for the 'gzip' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
data = []
for chunk in reply:
data.append(chunk)
if data:
yield gzip.compress(encodeUnicodeToBytes("".join(data)), compress_level)
# : Stream compression methods.
_stream_compressor = {
'identity': _stream_compress_identity,
'deflate': _stream_compress_deflate,
'gzip': _stream_compress_gzip
}
def stream_compress(reply, available, compress_level, max_chunk):
"""If compression has been requested via Accept-Encoding request header,
and is granted for this response via `available` compression methods,
convert the streaming `reply` into another streaming response which is
compressed at the exact chunk boundaries of the original response,
except that individual chunks may be coalesced up to `max_chunk` size.
The `compression_level` tells how hard to compress, zero disables the
compression entirely."""
global _stream_compressor
for enc in cherrypy.request.headers.elements('Accept-Encoding'):
if enc.value not in available:
continue
elif enc.value in _stream_compressor and compress_level > 0:
# Add 'Vary' header for 'Accept-Encoding'.
vary_by('Accept-Encoding')
# Compress contents at original chunk boundaries.
if 'Content-Length' in cherrypy.response.headers:
del cherrypy.response.headers['Content-Length']
cherrypy.response.headers['Content-Encoding'] = enc.value
return _stream_compressor[enc.value](reply, compress_level, max_chunk)
return reply
def _etag_match(status, etagval, match, nomatch):
"""Match ETag value against any If-Match / If-None-Match headers."""
# Execute conditions only for status 2xx. We only handle GET/HEAD
# requests here, it makes no sense to try to do this for PUT etc.
# as they need to be handled as request pre-condition, not in the
# streaming out part here.
if cherrypy.request.method in ('GET', 'HEAD'):
status, dummyReason, dummyMsg = httputil.valid_status(status)
if status >= 200 and status <= 299:
if match and ("*" in match or etagval in match):
raise cherrypy.HTTPError(412, "Precondition on ETag %s failed" % etagval)
if nomatch and ("*" in nomatch or etagval in nomatch):
raise cherrypy.HTTPRedirect([], 304)
def _etag_tail(head, tail, etag):
"""Generator which first returns anything in `head`, then `tail`.
Sets ETag header at the end to value of `etag` if it's defined and
yields a value."""
for chunk in head:
yield encodeUnicodeToBytes(chunk)
for chunk in tail:
yield encodeUnicodeToBytes(chunk)
etagval = (etag and etag.value())
if etagval:
cherrypy.response.headers["ETag"] = etagval
def stream_maybe_etag(size_limit, etag, reply):
"""Maybe generate ETag header for the response, and handle If-Match
and If-None-Match request headers. Consumes the reply until at most
`size_limit` bytes. If the response fits into that size, adds the
ETag header and matches it against any If-Match / If-None-Match
request headers and replies appropriately.
If the response is fully buffered, and the `reply` generator actually
results in an error and sets X-Error-HTTP / X-Error-Detail headers,
converts that error back into a real HTTP error response. Otherwise
responds with the fully buffered body directly, without generator
and chunking. In other words, responses smaller than `size_limit`
are always fully buffered and replied immediately without chunking.
If the response is not fully buffered, it's guaranteed to be output
at original chunk boundaries.
Note that if this function is fed the output from `stream_compress()`
as it normally would be, the `size_limit` constrains the compressed
size, and chunk boundaries correspond to compressed chunks."""
req = cherrypy.request
res = cherrypy.response
match = [str(x) for x in (req.headers.elements('If-Match') or [])]
nomatch = [str(x) for x in (req.headers.elements('If-None-Match') or [])]
# If ETag is already set, match conditions and output without buffering.
etagval = res.headers.get('ETag', None)
if etagval:
_etag_match(res.status or 200, etagval, match, nomatch)
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail([], reply, None)
# Buffer up to size_limit bytes internally. This interally builds up the
# ETag value inside 'etag'. In case of exceptions the ETag invalidates.
# If we exceed the limit, fall back to streaming without checking ETag
# against If-Match/If-None-Match. We'll still set the ETag in the trailer
# headers, so clients which understand trailers will get the value; most
# clients including browsers will ignore them.
size = 0
result = []
for chunk in reply:
result.append(chunk)
size += len(chunk)
if size > size_limit:
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail(result, reply, etag)
# We've buffered the entire response, but it may be an error reply. The
# generator code does not know if it's allowed to raise exceptions, so
# it swallows all errors and converts them into X-* headers. We recover
# the original HTTP response code and message from X-Error-{HTTP,Detail}
# headers, if any are present.
err = res.headers.get('X-Error-HTTP', None)
if err:
message = res.headers.get('X-Error-Detail', 'Original error lost')
raise cherrypy.HTTPError(int(err), message)
# OK, we buffered the entire reply and it's ok. Check ETag match criteria.
# The original stream generator must guarantee that if it fails it resets
# the 'etag' value, even if the error handlers above didn't run.
etagval = etag.value()
if etagval:
res.headers['ETag'] = etagval
_etag_match(res.status or 200, etagval, match, nomatch)
# OK, respond with the buffered reply as a plain string.
res.headers['Content-Length'] = size
# TODO investigate why `result` is a list of bytes strings in py3
# The current solution seems to work in both py2 and py3
resp = b"" if PY3 else ""
for item in result:
resp += encodeUnicodeToBytesConditional(item, condition=PY3)
assert len(resp) == size
return resp | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/REST/Format.py | 0.843605 | 0.218909 | Format.py | pypi |
from Utils.Utilities import encodeUnicodeToBytes
from future.utils import viewitems, viewvalues, listitems
import os, hmac, hashlib, cherrypy
from tempfile import NamedTemporaryFile
from Utils.PythonVersion import PY3
from WMCore.REST.Main import RESTMain
from WMCore.REST.Auth import authz_canonical
from WMCore.Configuration import Configuration
def fake_authz_headers(hmac_key, method = 'HNLogin',
login='testuser', name='Test User',
dn="/test/dn", roles={}, format="list"):
"""Create fake authentication and authorisation headers compatible
with the CMSWEB front-ends. Assumes you have the HMAC signing key
the back-end will use to validate the headers.
:arg str hmac_key: binary key data for signing headers.
:arg str method: authentication method, one of X509Cert, X509Proxy,
HNLogin, HostIP, AUCookie or None.
:arg str login: account login name.
:arg str name: account user name.
:arg str dn: account X509 subject.
:arg dict roles: role dictionary, each role with 'site' and 'group' lists.
:returns: list of header name, value tuples to add to a HTTP request."""
headers = { 'cms-auth-status': 'OK', 'cms-authn-method': method }
if login:
headers['cms-authn-login'] = login
if name:
headers['cms-authn-name'] = name
if dn:
headers['cms-authn-dn'] = dn
for name, role in viewitems(roles):
name = 'cms-authz-' + authz_canonical(name)
headers[name] = []
for r in 'site', 'group':
if r in role:
headers[name].extend(["%s:%s" % (r, authz_canonical(v)) for v in role[r]])
headers[name] = " ".join(headers[name])
prefix = suffix = ""
hkeys = list(headers)
for hk in sorted(hkeys):
if hk != 'cms-auth-status':
prefix += "h%xv%x" % (len(hk), len(headers[hk]))
suffix += "%s%s" % (hk, headers[hk])
msg = prefix + "#" + suffix
if PY3:
hmac_key = encodeUnicodeToBytes(hmac_key)
msg = encodeUnicodeToBytes(msg)
cksum = hmac.new(hmac_key, msg, hashlib.sha1).hexdigest()
headers['cms-authn-hmac'] = cksum
if format == "list":
return listitems(headers)
else:
return headers
def fake_authz_key_file(delete=True):
"""Create temporary file for fake authorisation hmac signing key.
:returns: Instance of :class:`~.NamedTemporaryFile`, whose *data*
attribute contains the HMAC signing binary key."""
t = NamedTemporaryFile(delete=delete)
with open("/dev/urandom", "rb") as fd:
t.data = fd.read(20)
t.write(t.data)
t.seek(0)
return t
def setup_dummy_server(module_name, class_name, app_name = None, authz_key_file=None, port=8888):
"""Helper function to set up a :class:`~.RESTMain` server from given
module and class. Creates a fake server configuration and instantiates
the server application from it.
:arg str module_name: module from which to import test class.
:arg str class_type: name of the server test class.
:arg str app_name: optional test application name, 'test' by default.
:returns: tuple with the server object and authz hmac signing key."""
if authz_key_file:
test_authz_key = authz_key_file
else:
test_authz_key = fake_authz_key_file()
cfg = Configuration()
main = cfg.section_('main')
main.application = app_name or 'test'
main.silent = True
main.index = 'top'
main.authz_defaults = { 'role': None, 'group': None, 'site': None }
main.section_('tools').section_('cms_auth').key_file = test_authz_key.name
app = cfg.section_(app_name or 'test')
app.admin = 'dada@example.org'
app.description = app.title = 'Test'
views = cfg.section_('views')
top = views.section_('top')
top.object = module_name + "." + class_name
server = RESTMain(cfg, os.getcwd())
server.validate_config()
server.setup_server()
server.install_application()
cherrypy.config.update({'server.socket_port': port})
cherrypy.config.update({'server.socket_host': '127.0.0.1'})
cherrypy.config.update({'request.show_tracebacks': True})
cherrypy.config.update({'environment': 'test_suite'})
for app in viewvalues(cherrypy.tree.apps):
if '/' in app.config:
app.config["/"]["request.show_tracebacks"] = True
return server, test_authz_key | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/REST/Test.py | 0.631935 | 0.193147 | Test.py | pypi |
from copy import deepcopy
from WMCore.ReqMgr.DataStructs.RequestStatus import ALLOWED_ACTIONS_FOR_STATUS
class AuthzByStatus:
"""
Class defining the authorization policy based on the
target request status.
This class depends on the ReqMgr2 authorization configuration.
"""
def __init__(self, authzByStatus, authzRolesGroups):
"""
Create a request status based data structure, with the role and
groups passed from the service configuration file.
:param authzByStatus: a list of dictionary items. Where each dict contains
a permission group and a list of allowed statuses.
:param authzRolesGroups: a nested dictionary with CRIC roles and groups
permissions for each permissions group
:return: None
"""
self.noStatus = None
self.authzByStatus = {}
self.authzRolesGroups = authzRolesGroups
# first, map status to required permission
for item in authzByStatus:
for status in item["statuses"]:
if status == "NO_STATUS":
self.noStatus = item["permission"]
else:
self.authzByStatus[status] = item["permission"]
# now validate the authorization configuration
self._validateAuthzStruct()
def _validateAuthzStruct(self):
"""
Runs a basic validation of the authorization data structure
to catch possible mistakes made in the configuration file.
:return: raises an exception if problems are found
"""
if self.noStatus is None:
msg = "Configuration for authorization by status does not define the required "
msg += "roles and groups for a non-status change update. Please review it."
raise RuntimeError(msg)
expectedPermissions = set(["admin", "ops", "ppd"])
if set(list(self.authzRolesGroups)) != expectedPermissions:
msg = "Authorized roles and groups do not match the expected group permissions, "
msg += f"they are: {expectedPermissions}. Please review it."
raise RuntimeError(msg)
if set(self.authzByStatus.values()) != expectedPermissions:
msg = "Configuration for authorization by status does not match the expected "
msg += f"group permissions, they are: {expectedPermissions}. Please review it."
raise RuntimeError(msg)
if len(list(ALLOWED_ACTIONS_FOR_STATUS.keys())) != len(self.authzByStatus.keys()):
msg = "Configuration for authorization by status is missing workflow "
msg += "status(es). Please review it and make sure all statuses are covered."
raise RuntimeError(msg)
def getRolesGroupsByStatus(self, requestArgs):
"""
Given a dictionary with the request arguments being changed by
the user, decide which roles/groups the user needs to have in
order to complete the desired action.
:param requestArgs: dictionary with request arguments posted by the user
:return: dictionary with the roles and groups required for such action
"""
targetStatus = requestArgs.get("RequestStatus", None)
if targetStatus is None:
return deepcopy(self.authzRolesGroups[self.noStatus])
permissionGroup = self.authzByStatus[targetStatus]
return deepcopy(self.authzRolesGroups[permissionGroup]) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/ReqMgr/DataStructs/AuthzByStatus.py | 0.749454 | 0.354433 | AuthzByStatus.py | pypi |
from __future__ import print_function, division
CAMPAIGN_CONFIG = \
{
"Summer16Geant4102": {
"go": True,
"EventsPerLumi": "x10"
},
"HIRun2015": {
"go": True,
"labels" : ["02May2016" ,"25Aug2016"],
"overflow" : {"PRIM" : {}},
"DDMcopies": {
"all" : { "N" : 2 }
},
"custodial_override" : ["DQMIO"],
"fractionpass": 1.0,
"lumisize" : -1,
"maxcopies" : 1,
"custodial": "T1_FR_CCIN2P3_MSS",
"NonCustodialSites": ["T2_US_Vanderbilt"],
"SiteBlacklist": [
"T1_US_FNAL",
"T2_US_Purdue",
"T2_US_Caltech",
"T2_US_Florida",
"T2_US_Nebraska",
"T2_US_UCSD",
"T2_US_Wisconsin"
]
},
"TTI2023Upg14GS": {
"go": True,
"EventsPerLumi": 25
},
"TTI2023Upg14D": {
"go": True,
"SiteWhitelist": ["T1_US_FNAL", "T1_ES_PIC", "T1_FR_CCIN2P3", "T1_IT_CNAF", "T1_RU_JINR", "T1_UK_RAL",
"T1_DE_KIT", "T2_US_MIT", "T2_US_Nebraska", "T2_US_Caltech", "T2_CH_CERN"],
"secondaries": {"/PYTHIA6_MinBias_TuneZ2star_14TeV/TTI2023Upg14GS-FLATBS15_DES23_62_V1-v1/GEN-SIM": {},
"/PYTHIA6_MinBias_TuneZ2star_14TeV/TTI2023Upg14GS-DES23_62_V1-v1/GEN-SIM": {},
"/MinBias_TuneZ2star_14TeV-pythia6/TTI2023Upg14-DES23_62_V1-v1/GEN-SIM": {},
"/PYTHIA6_Tauola_TTbar_TuneZ2star_14TeV/TTI2023Upg14GS-CoolingDefect_DES23_62_V1-v1/GEN-SIM": {}
}
},
"Run2016B": {
"go": True,
"labels": ["03Feb2017", "15Feb2017", "22Feb2017", "18Apr2017_ver2", "18Apr2017"],
"overflow": {"PRIM": {}},
"fractionpass": 1.0,
"lumisize": -1,
"maxcopies": 1
},
"Run2015C_25ns": {
"go": True,
"labels": ["24Nov2016", "16Jan2017", "19Jan2017"],
"DDMcopies": {
"all": {"N": 4},
"RECO": {"N": 1, "host": ["T1_US_FNAL_Disk"]}
},
"custodial_override": ["DQMIO"],
"fractionpass": {"all": 1.0, "AOD": 1.0, "MINIAOD": 1.0, "DQMIO": 1.0, "USER": 1.0, "RAW-RECO": 1.0},
"lumisize": -1,
"maxcopies": 1,
"primary_AAA": False,
"overflow": {"PRIM": {}}
},
"PhaseIISpring17D": {
"go": True,
"toDDM": ["GEN-SIM-DIGI-RAW"],
"lumisize": 1500,
"maxcopies": 1,
"fractionpass": 0.95,
"tune": True,
"SiteBlacklist": ["T1_US_FNAL", "T0_CH_CERN", "T2_CH_CERN_HLT", "T2_CH_CERN"],
"secondaries": {
"/MinBias_TuneCUETP8M1_14TeV-pythia8/PhaseIIFall16GS82-90X_upgrade2023_realistic_v1-v1/GEN-SIM": {},
"/MinBias_TuneCUETP8M1_14TeV-pythia8/PhaseIISpring17GS-90X_upgrade2023_realistic_v9-v1/GEN-SIM": {
"SiteWhitelist": ["T1_DE_KIT", "T2_IT_Rome", "T1_UK_RAL", "T2_ES_CIEMAT", "T2_US_Nebraska",
"T2_US_Caltech", "T2_US_Wisconsin"]}}
},
# RelVal config
"CMSSW_9_1_0_pre1__UPSG_VBF_PU200-1492881257": {
"SiteWhitelist": ["T1_US_FNAL"],
"MergedLFNBase": "/store/relval",
"Team": "relval",
"NonCustodialGroup": "RelVal",
"maxcopies": 1,
"custodial": "T1_US_FNAL_MSS",
"lumisize": -1,
"phedex_group": "RelVal",
"fractionpass": 0.0,
"go": True
},
"CMSSW_9_1_0_pre3__fullSimPU_premix-1493915792": {
"SiteWhitelist": [
"T1_US_FNAL"
],
"MergedLFNBase": "/store/relval",
"Team": "relval",
"NonCustodialGroup": "RelVal",
"maxcopies": 1,
"custodial": "T1_US_FNAL_MSS",
"lumisize": -1,
"phedex_group": "RelVal",
"fractionpass": 0.0,
"go": True
},
} | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/ReqMgr/DataStructs/DefaultConfig/CAMPAIGN_CONFIG.py | 0.498779 | 0.283004 | CAMPAIGN_CONFIG.py | pypi |
from __future__ import print_function, division
UNIFIED_CONFIG = {
"site_for_overflow": {
"value": ["nono_T0_CH_CERN",
"T2_CH_CERN_HLT"],
"description": "The sites that we set to overflow and require a specific treatment"
},
"overflow_pressure": {
"value": 0.5,
"description": "The ratio pending/running over which to consider overflowing"
},
"DDM_buffer_level": {
"value": 0.8,
"description": "The fraction of the DDM quota we are allowed to use"
},
"sites_banned": {
"value": ["T2_CH_CERN_AI",
"NONO_T0_CH_CERN",
"T2_TH_CUNSTDA",
"NONO_T2_US_Vanderbilt",
"T2_EE_Estonia",
"T2_UA_KIPT"
],
"description": "The sites that are banned from production"
},
"sites_auto_approve": {
"value": ["T0_CH_CERN_MSS", "T1_FR_CCIN2P3_MSS"],
"description": "The sites we can autoapprove tape request to"
},
"sites_space_override": {
"value": {
},
"description": "Over-ride the available space at a phedex end point"
},
"sites_with_goodIO": {
"value": ["T2_DE_DESY", "T2_DE_RWTH", "T2_ES_CIEMAT", "T2_FR_GRIF_LLR", "T2_FR_GRIF_IRFU", "T2_FR_IPHC",
"T2_FR_CCIN2P3", "T2_IT_Bari", "T2_IT_Legnaro", "T2_IT_Pisa", "T2_IT_Rome", "T2_UK_London_Brunel",
"T2_UK_London_IC", "T2_US_Caltech", "T2_US_MIT", "T2_US_Nebraska", "T2_US_Purdue", "T2_US_UCSD",
"T2_US_Wisconsin", "T2_US_Florida", "T2_BE_IIHE", "T2_EE_Estonia", "T2_PL_Swierk", "T2_CH_CERN",
"T2_CH_CERN_HLT"],
"description": "The sites identified as having good storage badnwidth"
},
"max_cpuh_block": {
"value": 40000000,
"description": "Value of CPUh above which a wf is blocked from assigning"
},
"block_repositionning": {
"value": True,
"description": "Whether or not to retransfer block from WQE without location"
},
"allowed_bypass": {
"description": "Who is allowed to bypass and force complete",
"value": [["vlimant", "vlimant@cern.ch"],
["prozober", "paola.katherine.rozo.bernal@cern.ch"],
["mcremone", "matteoc@fnal.gov"]]
},
"max_tail_priority": {
"value": 5,
"description": "Number of workflow to increase the priority of at a time"
},
"injection_delay_threshold": {
"value": 50,
"description": "Number of days after wich to increase the priority of a workflow"
},
"delay_priority_increase": {
"value": 10000,
"description": "Priority from original increase per week over the delay threshold"
},
"injection_delay_priority": {
"value": 75000,
"description": "Priority above which we can increase the priority of a workflow after running too long"
},
"max_force_complete": {
"value": 10,
"description": "Number of workflow that can be forced complete at a time"
},
"max_per_round": {
"description": "limitation on the number of wf to process per module",
"value": {
"transferor": None,
"assignor": None,
"closor": None,
"checkor": None,
"completor": None
}
},
"default_fraction_pass": {
"value": 1.0,
"description": "completion fraction above which to announce dataset"
},
"pattern_fraction_pass": {
"value": {},
"description": "overide of the completion fraction of dataset with keyword"
},
"tiers_with_no_custodial": {
"value": ["DQM", "DQMIO", "RECO", "RAWAODSIM"],
"description": "The data tiers that do not go to tape. Can be overidden by custodial overide at campaign level"
},
"use_parent_custodial": {
"value": False,
"description": "Use the location of the parent dataset for custodial copy"
},
"tape_size_limit": {
"value": 200,
"description": "Size over which to prevent transfer to tape automatically"
},
"tiers_with_no_check": {
"value": ["DQM", "DQMIO"],
"description": "The data tiers that do not pass closeout checks. Can be overidden by custodial overide at campaign level"
},
"tiers_no_DDM": {
"value": ["GEN-SIM", "LHE", "GEN", "DQM", "DQMIO", "GEN-SIM-DIGI-RAW", "RAW"],
"description": "The data tiers that do not go to AnaOps"
},
"tiers_to_DDM": {
"value": ["AODSIM", "MINIAODSIM", "GEN-SIM-RAW", "GEN-SIM-RECO", "GEN-SIM-RECODEBUG", "AOD", "RECO", "MINIAOD",
"ALCARECO", "USER", "RAW-RECO", "RAWAODSIM"],
"description": "The data tiers that go to AnaOps"
},
"tiers_keep_on_disk": {
"value": ["LHE"],
"description": "the data tier not unlocked until used again"
},
"check_fullcopy_to_announce": {
"value": False,
"description": "Whether to check for a full copy being present prior to announcing a dataset"
},
"stagor_sends_back": {
"value": True,
"description": "Whether the stagor module can send workflow back to considered"
},
"max_handled_workflows": {
"value": 4000,
"description": "The total number of workflows that we allow to handle at a time (transfer, running, assistance)"
},
"max_staging_workflows": {
"value": 700,
"description": "The total number of workflows that we allow to stage at a time"
},
"max_staging_workflows_per_site": {
"value": 700,
"description": "The total number of workflows that we allow to stage at a time per site"
},
"max_transfer_in_GB": {
"value": 800000,
"description": "The total size of the input datasets that can be transfered at a given time"
},
"transfer_timeout": {
"value": 7,
"description": "Time in days after which to consider a transfer to be stuck"
},
"transfer_lowrate": {
"value": 0.004,
"description": "Rate in GB/s under which to consider a transfer to be stuck, after transfer_timeout days"
},
"less_copies_than_requested": {
"value": 1,
"description": "Decrease the number of requested copies by that number, floored to 1"
},
"chopping_threshold_in_GB": {
"value": 4000,
"description": "The threshold before choping an input dataset in chunk of that size for spreading to sites"
},
"error_codes_to_recover": {
"value": {"50664": [{"legend": "time-out",
"solution": "split-2",
"details": None,
"rate": 20
}],
"50660": [{"legend": "memory excess",
"solution": "mem-1000",
"details": None,
"rate": 20
}],
"61104": [{"legend": "failed submit",
"solution": "recover",
"details": None,
"rate": 20
}],
"8028": [{"legend": "read error",
"solution": "recover",
"details": None,
"rate": 20
}],
"8021": [{"legend": "cmssw failure",
"solution": "recover",
"details": "FileReadError",
"rate": 20
}],
"71305": [{"legend": "long pending",
"solution": "recover",
"details": None,
"rate": 20
}],
"8001": [{"legend": "lhe failure",
"solution": "split-4",
"details": "No lhe event found in ExternalLHEProducer::produce()",
"rate": 20
}]
},
"description": "The error code, threshold and rules for auto-recovery"
},
"error_codes_to_block": {
"value":
{
"99109": [{"legend": "stage-out",
"solution": "recover",
"details": None,
"rate": 20
}]
},
"description": "The error code, threshold and rules to prevent auto-recovery"
},
"error_codes_to_notify": {
"value": {
"8021": {"message": "Please take a look and come back to Ops."}
},
"description": "The error code, threshold and rules to notify the user of an error in production"
},
"user_rereco": {
"description": "The users from which we expect ReReco requests",
"value": ["cerminar", "fabozzi"]
},
"user_relval": {
"description": "The users from which we expect relval requests",
"value": ["fabozzi", "nwickram", "bsutar", "rverma", "prebello", "piperov", "sandhya"]
},
"relval_routing": {
"description": "Set of keywords and special settings for relvals",
"value": {"cc7": {"parameters": {"SiteWhitelist": ["T2_US_Nebraska"]}},
"highIOtobedecidedflag": {"parameters": {"SiteWhitelist": ["T2_US_Nebraska", "T2_US_Purdue"]}}
}
},
"batch_goodness": {
"description": "Level below which to include a note in the batch report",
"value": 90
}
} | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/ReqMgr/DataStructs/DefaultConfig/UNIFIED_CONFIG.py | 0.734786 | 0.446012 | UNIFIED_CONFIG.py | pypi |
import re
from WMCore.REST.Server import RESTFrontPage
# path to static resources
RX_STATIC_DIR_PATH = re.compile(r"^([a-zA-Z]+/)+[-a-z0-9_]+\.(?:css|js|png|gif|html)$")
class FrontPage(RESTFrontPage):
def __init__(self, app, config, mount):
"""
:arg app: reference to the application object.
:arg config: reference to the configuration.
:arg str mount: URL mount point.
"""
# must be in a static content directory
frontpage = "html/WMStats/index.html"
roots = \
{
"html":
{
# without repeating the 'html' here, it doesn't work
# due to path gymnastics in WMCore.REST.Server.py
# rather messy figuring out static content dir by
# counting file separators and making it compatible
# between localhost and VM running, hence the config
# value here
"root": "%s/html/" % config.static_content_dir,
"rx": RX_STATIC_DIR_PATH
},
"js":
{ "root": "%s/html/WMStats/js/" % config.static_content_dir,
"rx": re.compile(r"^([a-zA-Z]+/)+[-a-z0-9_]+\.(?:js)$")
},
"css":
{ "root": "%s/html/WMStats/css/" % config.static_content_dir,
"rx": re.compile(r"^([a-zA-Z]+/)+[-a-z0-9_]+\.(?:css)$")
},
"images":
{ "root": "%s/html/WMStats/images/" % config.static_content_dir,
"rx": re.compile(r"^([a-zA-Z]+/)+[-a-z0-9_]+\.(?:png|gif)$")
},
"lib":
{ "root": "%s/html/WMStats/lib/" % config.static_content_dir,
"rx": RX_STATIC_DIR_PATH
},
"fonts":
{ "root": "%s/html/WMStats/fonts/" % config.static_content_dir,
"rx": RX_STATIC_DIR_PATH
}
}
RESTFrontPage.__init__(self, app, config, mount, frontpage, roots) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/WMStats/WebGui/FrontPage.py | 0.411584 | 0.156169 | FrontPage.py | pypi |
from __future__ import (division, print_function)
from WMCore.REST.Server import RESTEntity, restcall, rows
from WMCore.REST.Tools import tools
from WMCore.REST.Error import DataCacheEmpty
from WMCore.WMStats.DataStructs.DataCache import DataCache
from WMCore.REST.Format import JSONFormat, PrettyJSONFormat
from WMCore.ReqMgr.DataStructs.RequestStatus import ACTIVE_STATUS_FILTER
class ActiveRequestJobInfo(RESTEntity):
"""
get all the active requests with job information attatched
"""
def __init__(self, app, api, config, mount):
# main CouchDB database where requests/workloads are stored
RESTEntity.__init__(self, app, api, config, mount)
def validate(self, apiobj, method, api, param, safe):
return
@restcall(formats=[('text/plain', PrettyJSONFormat()), ('application/json', JSONFormat())])
@tools.expires(secs=-1)
def get(self):
# This assumes DataCahe is periodically updated.
# If data is not updated, need to check, dataCacheUpdate log
return rows([DataCache.getlatestJobData()])
class FilteredActiveRequestJobInfo(RESTEntity):
"""
get all the active requests with job information attatched
"""
def __init__(self, app, api, config, mount):
# main CouchDB database where requests/workloads are stored
RESTEntity.__init__(self, app, api, config, mount)
def validate(self, apiobj, method, api, param, safe):
for prop in param.kwargs:
safe.kwargs[prop] = param.kwargs[prop]
for prop in safe.kwargs:
del param.kwargs[prop]
return
@restcall(formats=[('text/plain', PrettyJSONFormat()), ('application/json', JSONFormat())])
@tools.expires(secs=-1)
def get(self, mask=None, **input_condition):
# This assumes DataCahe is periodically updated.
# If data is not updated, need to check, dataCacheUpdate log
return rows(DataCache.filterDataByRequest(input_condition, mask))
class ProtectedLFNList(RESTEntity):
"""
API which provides a list of ALL possible unmerged LFN bases (including
transient datasets/LFNs).
"""
def __init__(self, app, api, config, mount):
# main CouchDB database where requests/workloads are stored
RESTEntity.__init__(self, app, api, config, mount)
def validate(self, apiobj, method, api, param, safe):
return
@restcall(formats=[('text/plain', PrettyJSONFormat()), ('application/json', JSONFormat())])
@tools.expires(secs=-1)
def get(self):
# This assumes DataCahe is periodically updated.
# If data is not updated, need to check, dataCacheUpdate log
if DataCache.isEmpty():
raise DataCacheEmpty()
else:
return rows(DataCache.filterData(ACTIVE_STATUS_FILTER, ["OutputModulesLFNBases"]))
class ProtectedLFNListOnlyFinalOutput(RESTEntity):
"""
Same as ProtectedLFNList API, however this one only provides LFNs that are not
transient, so only final output LFNs.
"""
def __init__(self, app, api, config, mount):
# main CouchDB database where requests/workloads are stored
RESTEntity.__init__(self, app, api, config, mount)
def validate(self, apiobj, method, api, param, safe):
return
@restcall(formats=[('text/plain', PrettyJSONFormat()), ('application/json', JSONFormat())])
@tools.expires(secs=-1)
def get(self):
# This assumes DataCahe is periodically updated.
# If data is not updated, need to check, dataCacheUpdate log
return rows(DataCache.getProtectedLFNs())
class GlobalLockList(RESTEntity):
def __init__(self, app, api, config, mount):
# main CouchDB database where requests/workloads are stored
RESTEntity.__init__(self, app, api, config, mount)
def validate(self, apiobj, method, api, param, safe):
return
@restcall(formats=[('text/plain', PrettyJSONFormat()), ('application/json', JSONFormat())])
@tools.expires(secs=-1)
def get(self):
# This assumes DataCahe is periodically updated.
# If data is not updated, need to check, dataCacheUpdate log
if DataCache.isEmpty():
raise DataCacheEmpty()
else:
return rows(DataCache.filterData(ACTIVE_STATUS_FILTER,
["InputDataset", "OutputDatasets", "MCPileup", "DataPileup"])) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/WMStats/Service/ActiveRequestJobInfo.py | 0.703549 | 0.168617 | ActiveRequestJobInfo.py | pypi |
from builtins import range
from WMCore.DataStructs.Run import Run
class Mask(dict):
"""
_Mask_
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
self.inclusive = True
self.setdefault("inclusivemask", True)
self.setdefault("FirstEvent", None)
self.setdefault("LastEvent", None)
self.setdefault("FirstLumi", None)
self.setdefault("LastLumi", None)
self.setdefault("FirstRun", None)
self.setdefault("LastRun", None)
self.setdefault("runAndLumis", {})
def setMaxAndSkipEvents(self, maxEvents, skipEvents):
"""
_setMaxAndSkipEvents_
Set FirstEvent & LastEvent fields as max & skip events
"""
self['FirstEvent'] = skipEvents
if maxEvents is not None:
self['LastEvent'] = skipEvents + maxEvents
return
def setMaxAndSkipLumis(self, maxLumis, skipLumi):
"""
_setMaxAndSkipLumis
Set the Maximum number of lumi sections and the starting point
"""
self['FirstLumi'] = skipLumi
self['LastLumi'] = skipLumi + maxLumis
return
def setMaxAndSkipRuns(self, maxRuns, skipRun):
"""
_setMaxAndSkipRuns
Set the Maximum number of runss and the starting point
"""
self['FirstRun'] = skipRun
self['LastRun'] = skipRun + maxRuns
return
def getMaxEvents(self):
"""
_getMaxEvents_
return maxevents setting
"""
if self['LastEvent'] is None or self['FirstEvent'] is None:
return None
return self['LastEvent'] - self['FirstEvent'] + 1
def getMax(self, keyType=None):
"""
_getMax_
returns the maximum number of runs/events/etc of the type of the type string
"""
if 'First%s' % (keyType) not in self:
return None
if self['First%s' % (keyType)] is None or self['Last%s' % (keyType)] is None:
return None
return self['Last%s' % (keyType)] - self['First%s' % (keyType)] + 1
def addRun(self, run):
"""
_addRun_
Add a run object
"""
run.lumis.sort()
firstLumi = run.lumis[0]
lastLumi = run.lumis[0]
for lumi in run.lumis:
if lumi <= lastLumi + 1:
lastLumi = lumi
else:
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
firstLumi = lumi
lastLumi = lumi
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
return
def addRunWithLumiRanges(self, run, lumiList):
"""
_addRunWithLumiRanges_
Add to runAndLumis with call signature
addRunWithLumiRanges(run=run, lumiList = [[start1,end1], [start2, end2], ...]
"""
self['runAndLumis'][run] = lumiList
return
def addRunAndLumis(self, run, lumis=None):
"""
_addRunAndLumis_
Add runs and lumis directly
TODO: The name of this function is a little misleading. If you pass a list of lumis
it ignores the content of the list and adds a range based on the max/min in
the list. Missing lumis in the list are ignored.
NOTE: If the new run/lumi range overlaps with the pre-existing lumi ranges in the
mask, no attempt is made to merge these together. This can result in a mask
with duplicate lumis.
"""
lumis = lumis or []
if not isinstance(lumis, list):
lumis = list(lumis)
if run not in self['runAndLumis']:
self['runAndLumis'][run] = []
self['runAndLumis'][run].append([min(lumis), max(lumis)])
return
def getRunAndLumis(self):
"""
_getRunAndLumis_
Return list of active runs and lumis
"""
return self['runAndLumis']
def runLumiInMask(self, run, lumi):
"""
_runLumiInMask_
See if a particular runLumi is in the mask
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return True
if run not in self['runAndLumis']:
return False
for pair in self['runAndLumis'][run]:
# Go through each max and min pair
if pair[0] <= lumi and pair[1] >= lumi:
# Then the lumi is bracketed
return True
return False
def filterRunLumisByMask(self, runs):
"""
_filterRunLumisByMask_
Pass a Mask a list of run objects, get back a list of
run objects that correspond to the actual mask allowed values
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return runs
runDict = {}
for r in runs:
if r.run in runDict:
runDict[r.run].extendLumis(r.lumis)
else:
runDict[r.run] = r
maskRuns = set(self["runAndLumis"].keys())
passedRuns = set([r.run for r in runs])
filteredRuns = maskRuns.intersection(passedRuns)
newRuns = set()
for runNumber in filteredRuns:
maskLumis = set()
for pair in self["runAndLumis"][runNumber]:
if pair[0] == pair[1]:
maskLumis.add(pair[0])
else:
maskLumis = maskLumis.union(list(range(pair[0], pair[1] + 1)))
filteredLumis = set(runDict[runNumber].lumis).intersection(maskLumis)
if len(filteredLumis) > 0:
filteredLumiEvents = [(lumi, runDict[runNumber].getEventsByLumi(lumi)) for lumi in filteredLumis]
newRuns.add(Run(runNumber, *filteredLumiEvents))
return newRuns | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/DataStructs/Mask.py | 0.704567 | 0.254295 | Mask.py | pypi |
from __future__ import absolute_import, division, print_function
from future.utils import listitems
import sys
import hashlib
import time
from functools import total_ordering
from Utils.Utilities import encodeUnicodeToBytes
from WMCore.DataStructs.WMObject import WMObject
@total_ordering
class WorkUnit(WMObject, dict):
"""
_WorkUnit_
Data object that contains details for a single work unit
corresponding to tables workunit and frl_workunit_assoc
"""
fieldsToCopy = ['taskid', 'retry_count', 'last_unit_count', 'last_submit_time', 'status', 'firstevent',
'lastevent', 'fileid']
fieldsForInfo = fieldsToCopy + ['run_lumi']
def __init__(self, taskID=None, retryCount=0, lastUnitCount=None, lastSubmitTime=int(time.time()),
status=0, firstEvent=1, lastEvent=sys.maxsize, fileid=None, runLumi=None):
super(WorkUnit, self).__init__(self)
self.setdefault('taskid', taskID)
self.setdefault('retry_count', retryCount)
self.setdefault('last_unit_count', lastUnitCount)
self.setdefault('last_submit_time', lastSubmitTime)
self.setdefault('status', status)
self.setdefault('firstevent', firstEvent)
self.setdefault('lastevent', lastEvent)
self.setdefault('fileid', fileid)
self.setdefault('run_lumi', runLumi)
def __lt__(self, rhs):
"""
Compare work units in task id, run, lumi, first event, last event
"""
if self['taskid'] != rhs['taskid']:
return self['taskid'] < rhs['taskid']
if self['run_lumi'].run != rhs['run_lumi'].run:
return self['run_lumi'].run < rhs['run_lumi'].run
if self['run_lumi'].lumis != rhs['run_lumi'].lumis:
return self['run_lumi'].lumis < rhs['run_lumi'].lumis
if self['first_event'] != rhs['first_event']:
return self['first_event'] < rhs['first_event']
return self['last_event'] < rhs['last_event']
def __eq__(self, rhs):
"""
Work unit is equal if it has the same task, run, and lumi
"""
return (self['taskid'] == rhs['taskid'] and self['run_lumi'].run == self['run_lumi'].run and
self['run_lumi'].lumis == self['run_lumi'].lumis and self['firstevent'] == rhs['firstevent'] and
self['lastevent'] == rhs['lastevent'])
def __hash__(self):
"""
Hash function for this dict.
"""
# Generate an immutable sorted string representing this object
# NOTE: the run object needs to be hashed
immutableSelf = []
for keyName in sorted(self):
if keyName == "run_lumi":
immutableSelf.append((keyName, hash(self[keyName])))
else:
immutableSelf.append((keyName, self[keyName]))
hashValue = hashlib.sha1(encodeUnicodeToBytes(str(immutableSelf)))
return int(hashValue.hexdigest()[:15], 16)
def json(self, thunker=None):
"""
_json_
Serialize the object. Only copy select fields and construct one new field.
"""
jsonDict = {k: self[k] for k in WorkUnit.fieldsToCopy}
jsonDict["run_lumi"] = {"run_number": self['run_lumi'].run, "lumis": self['run_lumi'].lumis}
return jsonDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker)
def getInfo(self):
"""
Returns: tuple of parameters for the work unit
"""
return tuple(self[x] for x in WorkUnit.fieldsForInfo) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/DataStructs/WorkUnit.py | 0.592195 | 0.15241 | WorkUnit.py | pypi |
from builtins import str, bytes
__all__ = []
from WMCore.DataStructs.Run import Run
from WMCore.DataStructs.WMObject import WMObject
class File(WMObject, dict):
"""
_File_
Data object that contains details for a single file
TODO
- use the decorator `from functools import total_ordering` after
dropping support for python 2.6
- then, drop __ne__, __le__, __gt__, __ge__
"""
def __init__(self, lfn="", size=0, events=0, checksums=None,
parents=None, locations=None, merged=False):
dict.__init__(self)
checksums = checksums or {}
self.setdefault("lfn", lfn)
self.setdefault("size", size)
self.setdefault("events", events)
self.setdefault("checksums", checksums)
self.setdefault('runs', set())
self.setdefault('merged', merged)
self.setdefault('last_event', 0)
self.setdefault('first_event', 0)
if locations is None:
self.setdefault("locations", set())
else:
self.setdefault("locations", locations)
if parents is None:
self.setdefault("parents", set())
else:
self.setdefault("parents", parents)
def addRun(self, run):
"""
_addRun_
run should be an instance of WMCore.DataStructs.Run
Add a run container to this file, tweak the run and lumi
keys to be max run and max lumi for backwards compat.
"""
if not isinstance(run, Run):
msg = "addRun argument must be of type WMCore.DataStructs.Run"
raise RuntimeError(msg)
addFlag = False
for runMember in self['runs']:
if runMember.run == run.run:
# this rely on Run object overwrite __add__ to update self
runMember + run
addFlag = True
if not addFlag:
self['runs'].add(run)
return
def load(self):
"""
A DataStructs file has nothing to load from, other implementations will
over-ride this method.
"""
if self['id']:
self['lfn'] = '/store/testing/%s' % self['id']
def save(self):
"""
A DataStructs file has nothing to save to, other implementations will
over-ride this method.
"""
pass
def setLocation(self, pnn):
# Make sure we don't add None, [], "" as file location
if pnn:
self['locations'] = self['locations'] | set(self.makelist(pnn))
def __eq__(self, rhs):
"""
File is equal if it has the same name
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] == rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] == rhs
return eq
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
thisHash = self['lfn'].__hash__()
return thisHash
def __lt__(self, rhs):
"""
Sort files based on lexicographical ordering of the value connected
to the 'lfn' key
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] < rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] < rhs
return eq
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
def json(self, thunker=None):
"""
_json_
Serialize the file object. This will convert all Sets() to lists and
weed out the internal data structures that don't need to be shared.
"""
fileDict = {"last_event": self["last_event"],
"first_event": self["first_event"],
"lfn": self["lfn"],
"locations": list(self["locations"]),
"id": self.get("id", None),
"checksums": self["checksums"],
"events": self["events"],
"merged": self["merged"],
"size": self["size"],
"runs": [],
"parents": []}
for parent in self["parents"]:
if isinstance(parent, (str, bytes)):
# Then for some reason, we're passing strings
# Done specifically for ErrorHandler
fileDict['parents'].append(parent)
elif thunker is None:
continue
else:
fileDict["parents"].append(thunker._thunk(parent))
for run in self["runs"]:
runDict = {"run_number": run.run,
"lumis": run.lumis}
fileDict["runs"].append(runDict)
return fileDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/WMCore/DataStructs/File.py | 0.533884 | 0.174868 | File.py | pypi |
import logging
import sys
from collections import Counter
from WMCore.Services.DBS.DBS3Reader import DBS3Reader
from WMCore.Services.Rucio.Rucio import Rucio
RUCIO_ACCT = "wma_prod"
RUCIO_HOST = "http://cms-rucio.cern.ch"
RUCIO_AUTH = "https://cms-rucio-auth.cern.ch"
DBS_URL = "https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader"
def loggerSetup(logLevel=logging.INFO):
"""
Return a logger which writes everything to stdout.
"""
logger = logging.getLogger(__name__)
outHandler = logging.StreamHandler(sys.stdout)
outHandler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(module)s: %(message)s"))
outHandler.setLevel(logLevel)
logger.addHandler(outHandler)
logger.setLevel(logLevel)
return logger
def getFromRucio(dataset, logger):
"""
Using the WMCore Rucio object and fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, value is the amount of files.
"""
rucio = Rucio(acct=RUCIO_ACCT,
hostUrl=RUCIO_HOST,
authUrl=RUCIO_AUTH,
configDict={'logger': logger})
result = dict()
for block in rucio.getBlocksInContainer(dataset):
data = rucio.getDID(block)
result.setdefault(block, data['length'])
return result
def getFromDBS(dataset, logger):
"""
Uses the WMCore DBS3Reader object to fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, and an inner dictionary
with the number of valid and invalid files. It also returns a total counter
for the number of valid and invalid files in the dataset.
"""
dbsReader = DBS3Reader(DBS_URL, logger)
result = dict()
dbsFilesCounter = Counter({'valid': 0, 'invalid': 0})
blocks = dbsReader.listFileBlocks(dataset)
for block in blocks:
data = dbsReader.dbs.listFileArray(block_name=block, validFileOnly=0, detail=True)
result.setdefault(block, Counter({'valid': 0, 'invalid': 0}))
for fileInfo in data:
if fileInfo['is_file_valid'] == 1:
result[block]['valid'] += 1
dbsFilesCounter['valid'] += 1
else:
result[block]['invalid'] += 1
dbsFilesCounter['invalid'] += 1
return result, dbsFilesCounter
def main():
"""
Expects a dataset name as input argument.
It then queries Rucio and DBS and compare their blocks and
number of files.
"""
if len(sys.argv) != 2:
print("A dataset name must be provided in the command line")
sys.exit(1)
datasetName = sys.argv[1]
logger = loggerSetup(logging.INFO)
rucioOutput = getFromRucio(datasetName, logger)
dbsOutput, dbsFilesCounter = getFromDBS(datasetName, logger)
logger.info("*** Dataset: %s", datasetName)
logger.info("Rucio file count : %s", sum(rucioOutput.values()))
logger.info("DBS file count : %s", dbsFilesCounter['valid'] + dbsFilesCounter['invalid'])
logger.info(" - valid files : %s", dbsFilesCounter['valid'])
logger.info(" - invalid files : %s", dbsFilesCounter['invalid'])
logger.info("Blocks in Rucio but not in DBS: %s", set(rucioOutput.keys()) - set(dbsOutput.keys()))
logger.info("Blocks in DBS but not in Rucio: %s", set(dbsOutput.keys()) - set(rucioOutput.keys()))
for blockname in rucioOutput:
if blockname not in dbsOutput:
logger.error("This block does not exist in DBS: %s", blockname)
continue
if rucioOutput[blockname] != sum(dbsOutput[blockname].values()):
logger.warning("Block with file mismatch: %s", blockname)
logger.warning("\tRucio: %s\t\tDBS: %s", rucioOutput[blockname], sum(dbsOutput[blockname].values()))
if __name__ == "__main__":
sys.exit(main()) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/bin/adhoc-scripts/checkDsetFileCount.py | 0.413477 | 0.306037 | checkDsetFileCount.py | pypi |
# Reqqie - text based requirement management tool
## Overview
Reqqie is a text based tool to manage requirements. It does so by parsing text files, rewriting them and output reports.
Future feature is to use git to automatically check versions/changes to requirements.
The input files uses the tdbase format.
## Getting started
1. Install reqqie: "pip install reqqie"
2. Create a file "test.req1" with the following content:
```
.config:
top_requirement -> R1
.req R1:
descr: This describes requirement R1
depends:
-> R2
.req R2:
descr: This describes requirement R2
```
3. Run reqqie: "reqqie test1.req". Note that if <python-path>/Scripts are not in your path, you will have to use the full path to reqqie.
## Concepts:
Reqqie uses tdbase to read a database of different records.
Record types are defined in the "input file format" section.
The most important one is naturally a requirement (req).
The idea is that all requirements are connected in a Directed Acyclic Graph (DAG). In addition:
* all requirements should be connected through exactly one DAG. Ie, floating/free requirements are not allowed.
* There should be exactly one top requirement. All other requirements should be connected from "higher level" requirement(s).
A simple example:
* R1 - top requirement. Depends on R2,R3,R4
* R2 - Leaf requirement (does not depend on other requirements)
* R3 - middle requirement (not top, but depends on other requirement(s)). Depends on R4
* R4 - Leaf requirement.
This can be shown as (note that R4 appears twice, but is actually the same requirement):
```
R1
R2
R3
R4
R4
```
Another way to show this is:
```
R1
|
+-+-+--+
| | |
R2 R3 |
| |
R4
```
Depending on the position in the DAG, every requirement get a numeric level. The top level requirement is always 1, and the others are "max(level of requirements that depends on it)+1". Ie:
* R1, numeric lvl 1
* R2, numeric lvl 2
* R3, numeric lvl 2
* R4, numeric lvl 3
This numeric level is calculated by reqqie, and is an indication of how low level/high level a requirement is.
In addition to the numeric levels, there can be "main levels", defined by the "main_levels" attribute in the config file. The main level is always manually given to each attribute. However, Reqqie will asure that no requirement from a lower level will depend on a requirement from a higher level. It is OK to depend on requirements in the same level though (but the numeric/automatic/fine grain levels assure that the graph is a DAG).
A simple example of main levels are:
```
main_levels:
- system
- design
- module
```
This is a quite common distinction in simple projects. Ie, some requirements are on the "system" level, defining things on the overall system level, often more from usage point of view. More detailed requirements are on the "design" level, defining more from the design point of view. Lowest level in this simple model defines requirements on the module level.
Besides the levels, requirement can have other metadata, further helping the understanding of the complete requirement set:
* categories. Categories are records in a parallell DAG. They define more what a requirement is about. For instance, categories for a SW product could be:
- System - top level category.
- Doc - documentation
- SW - software category
SW could be further divided in PC, Frontend, backend etc.
* tags. There are just free text tags.
* release. For project management, requirements can be attached to different releases, and then in reporting, requirement fulfillment per release can be tracked.
* stage: Every requirement goes through different stages. These are propagated up in the requirement DAG. See separate section regarding stages.
## Input file format
Input files are tdbase formatted. The record types supported are:
* .config - singleton record defining structure
* .category - tree structured data used to categorize requirement records
* .release - used to divide requirements into releases
* .req - an actual requirement
### .config
Singleton record which defines structure etc for the requirements
Attributes:
* paths - list of paths (strs) where external files are checked.
* tags - list of strings. These are possible tags to further define what a requirement is. Completely optional.
* top_category - pointer to the top level category record
* top_requirement - pointer to the top level/main requirement
* main_levels - list of strings which gives names to the main levels. There are sub levels automatically assigned, so no requirement ever depends on a requirement at the same or higher level.
### .category
Every requirement should belong to a category. From a definition point of view, this normally reflects how broad or detailed a requirement is. There are some similarities with "level", but those are two different things.
### .release
For project management, it is possible to tag requirements with different releases. Ie, a first release can normally not fulfill all requirements, but by tagging future requirements with later releases, it is possible to get status reports regarding requirement fulfillment for different releases.
### .req
This record defines one requirement.
Attributes:
* descr - text string which describes the requirement. Ie, "General requirement for requirements"
* def - text string which defines the requirement. Normally a short sentence which "Shall" in it. Ie, "Requirements shall be testable and possible to measure. A clear pass/fail criteria shall exist."
* test-spec: either a text string or a link to external document that defines how to test the requirement
* test: link to test report document that shows the test results
* stage:
* depends: list of links to other requirements which must be fulfilled for this requirement to be fulfilled
* category: link to category record
* release: link to the release where this requirement must be fulfilled in.
* level: which main_level this requirement belongs to
* background: string or link to external document. Describes the reason for the requirement. This can be a very helpful information later on when revisiting requirements.
# Examples
## 1. Minimal
```
.config:
top_category -> only_category
top_requirement -> top_req
.category only_category:
name: The one and only category
.req top_req:
descr: Top requirement which links all other requirements.
def: All dependant requirements shall be fulfilled and tested.
depends:
-> R2
.req R2:
descr: Example requirement
```
| /reqqie-0.0.5.tar.gz/reqqie-0.0.5/README.md | 0.535098 | 0.867766 | README.md | pypi |
# reqtry
A simple implementation of retries for [psf/request](https://github.com/psf/requests) python library. Retry code is based in retry project [invl/retry](https://github.com/invl/retry)
It has the same functions of request/api.py only retries functionality was added. (request, get, post, put, patch, delete, options, head)
## Installation
```bash
$ pip install reqry
```
## Api
### Request
```python
def request(method, url, params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None, tries=1, delay=0, max_delay=None, backoff=1, jitter=0,
logger=logging_logger, raise_for_status=True):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Executes a request and retries it if it failed.
:param tries: the maximum number of attempts. default: 1.
:param delay: initial delay between attempts. default: 0.
:param max_delay: the maximum value of delay. default: None (no limit).
:param backoff: multiplier applied to delay between attempts. default: 1 (no backoff).
:param jitter: extra seconds added to delay between attempts. default: 0.
fixed if a number, random if a range tuple (min, max)
:param logger: logger.warning(fmt, error, delay) will be called on failed attempts.
default: retry.logging_logger. if None, logging is disabled.
:param rise_for_status: (optional) Boolean. Enable/disable rise an error when the response HTTP status code
is a 4xx or a 5xx. Defaults to ``True``.
:returns: the result of the f function."""
```
### Get
```python
def get(url, params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None, tries=1, delay=0, max_delay=None, backoff=1, jitter=0,
logger=logging_logger, raise_for_status=True)
```
### Post
```python
def post(url, params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None, tries=1, delay=0, max_delay=None, backoff=1, jitter=0,
logger=logging_logger, raise_for_status=True)
```
### Put
```python
def put(url, params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None, tries=1, delay=0, max_delay=None, backoff=1, jitter=0,
logger=logging_logger, raise_for_status=True)
```
### Patch
```python
def patch(url, params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None, tries=1, delay=0, max_delay=None, backoff=1, jitter=0,
logger=logging_logger, raise_for_status=True)
```
### Delete
```python
def delete(url, params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None, tries=1, delay=0, max_delay=None, backoff=1, jitter=0,
logger=logging_logger, raise_for_status=True)
```
### Options
```python
def options(url, params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None, tries=1, delay=0, max_delay=None, backoff=1, jitter=0,
logger=logging_logger, raise_for_status=True)
```
### Head
```python
def head(url, params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None, tries=1, delay=0, max_delay=None, backoff=1, jitter=0,
logger=logging_logger, raise_for_status=True)
```
## Examples
```python
import reqtry
reqtry.get(url, cookies=self._cookies, timeout=(3, 3), tries=3, delay=1)
'''Raise error after 3 attempts, sleep 1 seconds between attempts.'''
reqtry.get(url, cookies=self._cookies, timeout=(3, 3), delay=1, backoff=2, max_delay=8)
'''Raise error after 3 attempts, sleep 1, 2, 4 and 8 seconds between attempts.'''
reqtry.get(url, cookies=self._cookies, timeout=(3, 3), delay=1, max_delay=4, jitter=1)
'''Raise error after 3 attempts, sleep 1, 2, 3 and 4 seconds between attempts.'''
```
| /reqtry-0.0.2.tar.gz/reqtry-0.0.2/README.md | 0.877043 | 0.823825 | README.md | pypi |
import json
import queue
from datetime import datetime
from threading import Thread
from urllib import parse, request
def boosted_requests(
urls,
no_workers=32,
max_tries=5,
after_max_tries="assert",
timeout=10,
headers=None,
data=None,
verbose=True,
parse_json=True,
):
"""
Get data from APIs in parallel by creating workers that process in the background
:param urls: list of URLS
:param no_workers: maximum number of parallel processes {Default::32}
:param max_tries: Maximum number of tries before failing for a specific URL {Default::5}
:param after_max_tries: What to do if not successfull after "max_tries" for a specific URL,
one of {"assert", "break"} {Default::assert}
:param timeout: Waiting time per request {Default::10}
:param headers: Headers if any for the URL requests
:param data: data if any for the URL requests (Wherever not None a POST request is made)
:param verbose: Show progress [True or False] {Default::True}
:param parse_json: Parse response to json [True or False] {Default::True}
:return: List of response for each API (order is maintained)
"""
start = datetime.now()
def _printer(inp, end=""):
print(
f"\r::{(datetime.now() - start).total_seconds():.2f} seconds::",
str(inp),
end=end,
)
class GetRequestWorker(Thread):
def __init__(
self, request_queue, max_tries=5, after_max_tries="assert", timeout=10, verbose=True, parse_json=True
):
"""
Workers that can pull data in the background
:param request_queue: queue of the dict containing the URLs
:param max_tries: Maximum number of tries before failing for a specific URL
:param after_max_tries: What to do if not successfull after "max_tries" for a specific URL,
one of {"assert", "break"} {Default::assert}
:param timeout: Waiting time per request
:param verbose: Show progress [True or False]
:param parse_json: Parse response to json [True or False]
"""
Thread.__init__(self)
self.queue = request_queue
self.results = {}
self.max_tries = max_tries
assert str(after_max_tries).lower() in {"assert", "break"}, """
'after_max_tries' param CANNOT be anything that you want!
:param after_max_tries: What to do if not successfull after "max_tries" for a specific URL,
one of {"assert", "break"} {Default::assert}
"""
self.after_max_tries = str(after_max_tries).lower()
self.timeout = timeout
self.verbose = verbose
self.parse_json = parse_json
def run(self):
while True:
if self.verbose:
_printer(f">> {self.queue.qsize()} requests left", end="")
if self.queue.qsize() == 0:
break
else:
content = self.queue.get()
url = content["url"]
header = content["header"]
num_tries = content["retry"]
data = content["data"]
loc = content["loc"]
if num_tries >= self.max_tries:
if self.after_max_tries == "assert":
assert (
num_tries < self.max_tries
), f"Maximum number of attempts reached {self.max_tries} for {content}"
elif self.after_max_tries == "break":
break
try:
if data is not None:
data = parse.urlencode(data).encode()
_request = request.Request(url, data=data)
else:
_request = request.Request(url)
for k, v in header.items():
_request.add_header(k, v)
response = request.urlopen(_request, timeout=self.timeout)
except Exception as exp:
content["retry"] += 1
self.queue.put(content)
continue
if response.getcode() == 200:
data = response.read()
encoding = response.info().get_content_charset("utf-8")
decoded_data = data.decode(encoding)
self.results[loc] = (
json.loads(
decoded_data) if self.parse_json else decoded_data
)
self.queue.task_done()
else:
content["retry"] += 1
self.queue.put(content)
if headers is None:
headers = [{} for _ in range(len(urls))]
if data is None:
data = [None for _ in range(len(urls))]
assert len(headers) == len(
urls
), "Length of headers and urls need to be same OR headers needs to be None"
assert len(data) == len(
urls
), "Length of data and urls need to be same OR data needs to be None (in case of GET)"
url_q = queue.Queue()
for i in range(len(urls)):
url_q.put(
{
"url": urls[i],
"retry": 0,
"header": headers[i],
"loc": i,
"data": data[i],
}
)
workers = []
for _ in range(min(url_q.qsize(), no_workers)):
worker = GetRequestWorker(
url_q,
max_tries=max_tries,
after_max_tries=after_max_tries,
timeout=timeout,
verbose=verbose,
parse_json=parse_json,
)
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
ret = {}
for worker in workers:
ret.update(worker.results)
if verbose:
_printer(f">> DONE")
return [ret.get(_) for _ in range(len(urls))] | /request-boost-0.8.tar.gz/request-boost-0.8/request_boost/__init__.py | 0.52683 | 0.423607 | __init__.py | pypi |
from typing import Dict, List
CHROME_UA: str = "".join(
[
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) ",
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.67 Safari/537.36",
]
)
CHROME_HEADERS: Dict[str, str] = {
"sec-ch-ua": ' Not A;Brand";v="99", "Chromium";v="101", "Google Chrome";v="101"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"Upgrade-Insecure-Requests": "1",
"User-Agent": CHROME_UA,
"Accept": "".join(
[
"text/html,application/xhtml+xml,application/xml;q=0.9,",
"image/avif,image/webp,image/apng,*/*;q=0.8,",
"application/signed-exchange;v=b3;q=0.9",
]
),
"Sec-Fetch-Site": "none",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"Sec-Fetch-Dest": "document",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9",
}
CHROME_CIPHER_SUITE: List[str] = [
"TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-RSA-AES128-SHA",
"ECDHE-RSA-AES256-SHA",
"AES128-GCM-SHA256",
"AES256-GCM-SHA384",
"AES128-SHA",
"AES256-SHA",
]
FIREFOX98_CIPHER_SUITE: List[str] = [
"aes_128_gcm_sha_256",
"chacha20_poly1305_sha_256",
"aes_256_gcm_sha_384",
"ecdhe_ecdsa_aes_128_gcm_sha_256",
"ecdhe_rsa_aes_128_gcm_sha_256",
"ecdhe_ecdsa_chacha20_poly1305_sha_256",
"ecdhe_rsa_chacha20_poly1305_sha_256",
"ecdhe_ecdsa_aes_256_gcm_sha_384",
"ecdhe_rsa_aes_256_gcm_sha_384",
"ecdhe_ecdsa_aes_256_sha",
"ecdhe_ecdsa_aes_128_sha",
"ecdhe_rsa_aes_128_sha",
"ecdhe_rsa_aes_256_sha",
"rsa_aes_128_gcm_sha_256",
"rsa_aes_256_gcm_sha_384",
"rsa_aes_128_sha",
"rsa_aes_256_sha",
]
FIREFOX98_UA: str = "".join(
["Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0"]
)
FIREFOX98_HEADERS: Dict[str, str] = {
"User-Agent": FIREFOX98_UA,
"Accept": "".join(
[
"text/html,application/xhtml+xml,",
"application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
]
),
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Upgrade-Insecure-Requests": "1",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "none",
"Sec-Fetch-User": "?1",
"TE": "Trailers",
} | /request_curl-0.0.3-py3-none-any.whl/request_curl/defaults.py | 0.514156 | 0.219233 | defaults.py | pypi |
from validator import Validator
from jsonpath_ng import parse
import logging
from typing import List
from flask import request, abort
from functools import wraps
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class ValidationError(Exception):
"""Wraps around request validation errors"""
class RequestFilter(object):
@classmethod
def validate(cls, data: dict, filter_groups: List[dict]) -> bool:
"""
Returns True if atleast one filter group is validated or raises a
validation exception if none of the filter groups are validated.
Arguments:
data: Dictionary of request
filter_groups: List of filter groups containig JSON path keys to
parse `data` with and Validator rules to validate with.
JSON path syntax: https://goessner.net/articles/JsonPath/
Validator rules syntax: https://github.com/CSenshi/Validator/blob/master/RULES.md
"""
for group in filter_groups:
log.debug(f"Filter group:\n{group}")
results = {}
for path, rule in group.items():
try:
results[path] = [match.value for match in parse(path).find(data)][0]
except IndexError:
results[path] = ""
log.debug(f"Request mapping:\n{results}")
val = Validator(results, group)
is_valid = val.validate()
log.debug(f"Validated data:\n{val.get_validated_data()}")
errors = val.get_errors()
log.debug(f"Validation errors: {errors}")
if is_valid:
return True
raise ValidationError(errors)
def request_filter_groups(self, filter_groups: List[dict], flask=False):
"""
Decorator function that returns the passed function with original arguments
if validation is successful or returns a validation error response.
filter_groups: List of filter groups containig JSON path keys to
parse `data` with and Validator rules to validate with.
JSON path syntax: https://goessner.net/articles/JsonPath/
Validator rules syntax: https://github.com/CSenshi/Validator/blob/master/RULES.md
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if flask:
data = request.json
else:
data = args[0]
try:
self.validate(data, filter_groups)
except ValidationError as e:
log.error(e, exc_info=True)
if flask:
abort(422, e)
else:
return {"statusCode": 422, "body": e}
return func(*args, **kwargs)
return wrapper
return decorator | /request_filter_groups-0.0.4-py3-none-any.whl/request_filter_groups/filter_groups.py | 0.855926 | 0.28279 | filter_groups.py | pypi |
from base64 import (
b64encode,
)
from enum import (
IntEnum,
)
import json
from tempfile import (
TemporaryFile,
)
import urllib
import pyqrcode as pyqrcode
from web3 import Web3
from request_network.constants import (
EMPTY_BYTES_20,
PAYMENT_GATEWAY_BASE_URL,
)
from request_network.exceptions import (
RequestNotFound,
)
class Roles(IntEnum):
PAYER = 0
PAYEE = 1
class States(IntEnum):
# Request might not have been broadcast, or has been broadcast but not confirmed
PENDING = -1
CREATED = 0
ACCEPTED = 1
CANCELED = 2
class Payment(object):
payee_index = None
delta_amount = None
def __init__(self, payee_index, delta_amount):
self.payee_index = payee_index
self.delta_amount = delta_amount
def __repr__(self):
return "{}:{}".format(self.payee_index, self.delta_amount)
class Payee(object):
def __init__(self, id_address, amount, payment_address=None,
additional_amount=None, payment_amount=None, balance=None,
paid_amount=None):
"""
:param id_address:
:param payment_address:
:param amount: Expected amount to be paid for this Request
:param additional_amount: Extra amount, on top of expected amount
:param payment_amount: Amount to pay at creation when Request is created by payer
:param paid_amount: Track how much has been paid to this payee
"""
if payment_address:
payment_address = Web3.toChecksumAddress(payment_address)
else:
payment_address = None
self.id_address = Web3.toChecksumAddress(id_address)
self.payment_address = payment_address
self.amount = amount
# TODO these are only set when using create_request_as_payer. Better place for them?
self.additional_amount = additional_amount if additional_amount else 0
self.payment_amount = payment_amount if payment_amount else 0
# TODO when is balance used?
self.balance = balance if balance else 0
self.paid_amount = paid_amount if paid_amount else 0
@property
def is_paid(self):
""" Return True if the payee has received payments with a total greater than
or equal to the expected amount. The paid amount can be greater than the
expected amount if the payer supplied additional payments.
"""
# TODO rather naive, needs to be tested with additionals/subtractions
return self.paid_amount >= self.amount
class Payer(object):
""" Represents a Payer. BitcoinRefundAddresses not yet supported.
If `refund_address` is not given `id_address` is used as the refund address.
"""
def __init__(self, id_address, refund_address=None):
self.id_address = Web3.toChecksumAddress(id_address)
self.refund_address = Web3.toChecksumAddress(refund_address) \
if refund_address else self.id_address
class Request(object):
def __init__(self, currency_contract_address, payees, ipfs_hash=None, id=None, data=None,
payer=None, state=None, payments=None, creator=None,
expiration_date=None, signature=None, _hash=None,
transaction_hash=None):
""" Represents a Request which may be in one of multiple states:
- a Request that was retrieved from the blockchain
- a Signed Request that has been generated locally but does not exist on-chain
"""
self.id = id
self.currency_contract_address = currency_contract_address
self.payer = payer
self.payees = payees
self.ipfs_hash = ipfs_hash if ipfs_hash else ''
self.data = data if data else {}
self.state = state
self.expiration_date = expiration_date
self.signature = signature
self.hash = _hash
self.payments = payments if payments else []
self.creator = creator
self.transaction_hash = transaction_hash
@property
def amounts(self):
return [p.amount for p in self.payees]
@property
def payment_addresses(self):
return [p.payment_address if p.payment_address else EMPTY_BYTES_20
for p in self.payees]
@property
def id_addresses(self):
return [p.id_address for p in self.payees]
@property
def is_broadcast(self):
""" Returns True if this Request can be successfully retrieved from the blockchain.
"""
from request_network.api import RequestNetwork
request_api = RequestNetwork()
try:
request = request_api.get_request_by_transaction_hash(self.transaction_hash)
except RequestNotFound:
return False
# TODO validate other parameters? Hash the Request and compare hashes?
return request.id == self.id
@property
def is_paid(self):
""" Returns True if all payees have received their expected amounts.
"""
return all([p.is_paid for p in self.payees])
def as_base64(self, callback_url, ethereum_network_id):
""" Return the base64-encoded JSON string required by the payment gateway.
:param callback_url:
:type ethereum_network: request_network.types.EthereumNetwork
:return:
"""
if not self.hash:
raise Exception('Can not base64 encode a Request with no hash')
if not self.signature:
raise Exception('Can not base64 encode a Request with no signature')
return b64encode(json.dumps({
'signedRequest': {
'currencyContract': self.currency_contract_address,
# Although the parameter is called data, it is expecting the ipfs_hash.
'data': self.ipfs_hash,
'expectedAmounts': [str(a) for a in self.amounts],
'expirationDate': self.expiration_date,
'hash': self.hash,
'payeesIdAddress': self.id_addresses,
'payeesPaymentAddress': [
None if a == EMPTY_BYTES_20 else a for a in self.payment_addresses],
'signature': self.signature
},
'callbackUrl': callback_url,
'networkId': ethereum_network_id
}).encode('utf-8')).decode()
def get_payment_gateway_url(self, callback_url, ethereum_network_id):
return '{}{}'.format(
PAYMENT_GATEWAY_BASE_URL,
urllib.parse.quote(self.as_base64(callback_url, ethereum_network_id))
)
def write_qr_code(self, f, callback_url, ethereum_network_id, pyqrcode_kwargs=None):
""" Generate a QR code containing a URL to pay this Request via the payment gateway, and
write it to file-like object `f`.
"""
url = self.get_payment_gateway_url(
callback_url=callback_url,
ethereum_network_id=ethereum_network_id)
kwargs = pyqrcode_kwargs if pyqrcode_kwargs else {}
# Use 2 as the default scale. Request's URLs are quite long so they result in
# pixel-dense QR codes.
kwargs['scale'] = 2 if 'scale' not in kwargs else kwargs['scale']
qr_code = pyqrcode.create(url)
qr_code.png(f, **kwargs)
f.seek(0)
def get_qr_code_data_uri(self, callback_url, ethereum_network_id, pyqrcode_kwargs=None):
""" Return a link to the payment gateway as a data URI, suitable for inclusion in an
<img> tag.
"""
# TODO remove this and other QR code function, give example in docs.
# no point including png/pyqrcode for a function that is only a few lines
with TemporaryFile() as f:
self.write_qr_code(f, callback_url, ethereum_network_id, pyqrcode_kwargs)
encoded_uri = b64encode(f.read())
mime = "text/png;"
return "data:%sbase64,%s" % (mime, encoded_uri.decode()) | /request_network-0.0.10-py3-none-any.whl/request_network/types.py | 0.625552 | 0.254283 | types.py | pypi |
from collections import (
defaultdict,
namedtuple,
)
from unittest import (
mock,
)
from eth_abi import (
decode_abi,
)
from eth_abi.decoding import (
StringDecoder,
decode_uint_256,
)
from eth_utils import (
event_abi_to_log_topic,
)
from web3 import Web3
from web3.auto import (
w3,
)
from web3.utils.datastructures import (
AttributeDict,
)
from web3.utils.events import (
get_event_data,
)
from request_network.artifact_manager import (
ArtifactManager,
)
from request_network.constants import (
EMPTY_BYTES_20,
)
from request_network.exceptions import (
RequestNotFound,
RoleNotSupported,
TransactionNotFound,
)
from request_network.types import (
Payee,
Payment,
Request,
Roles,
)
from request_network.utils import (
get_service_for_currency,
retrieve_ipfs_data,
)
class RequestNetwork(object):
""" The main interaction point with the Request Network API.
"""
def create_request(self, role, currency, payees, payer, data=None):
""" Create a Request.
If the Request is being created by the payer it is possible to create
it and pay in a single transaction, using the `Payee.payment_amount`
and `Payee.additional_amount` parameters.
Note that this method broadcasts the transaction which will create the
Request, but does not confirm that the transaction is included in the
block. The Request's `is_broadcast` property can be used to check this -
it will be True if the Request can be successfully retrieved from
the blockchain using its transaction hash.
:param role: Role of the caller, i.e. Payer or Payee
:type role: types.Roles.PAYEE
:param currency: The currency in which payment will be made
:type currency: currency.Currency
:param payees: List of Payees
:type payees: [types.Payee]
:param payer: A Payer representing the address which will pay the Request
:type payer: types.Payer
:param data: Optional dictionary of data which will be stored on IPFS
:type data: dict
:return: The transaction hash of the transaction which, if successfully
included in a block, will create this Request.
"""
service_args = {
'payer_id_address': payer.id_address,
'payer_refund_address': payer.refund_address,
'id_addresses': [payee.id_address for payee in payees],
'payment_addresses': [payee.payment_address for payee in payees],
'amounts': [payee.amount for payee in payees],
'data': data
}
service = get_service_for_currency(currency)
if role == Roles.PAYEE:
method = getattr(service, 'create_request_as_payee')
elif role == Roles.PAYER:
method = getattr(service, 'create_request_as_payer')
service_args['additional_payments'] = [p.additional_amount for p in payees]
service_args['creation_payments'] = [p.payment_amount for p in payees]
else:
raise RoleNotSupported('{} is not a valid role'.format(role))
return method(**service_args)
def create_signed_request(self, role, currency, payees,
expiration_date, data=None):
""" Create a signed Request instance
:param role: Role of the signer - payer or payee (currently only payee is supported)
:type role: types.Roles.PAYEE
:param currency: The currency in which payment will be made
:type currency: currency.Currency
:param payees: List of Payee objects
:type payees: [types.Payee]
:param expiration_date: Unix timestamp after which Request can no longer be broadcast
:param data: Optional dictionary of data which will be stored on IPFS
:type data: dict
:return: A Request instance
:rtype: request_network.types.Request
"""
if role != Roles.PAYEE:
raise NotImplementedError('Signing Requests as the payer is not yet supported')
service_args = {
'id_addresses': [payee.id_address for payee in payees],
'payment_addresses': [payee.payment_address for payee in payees],
'amounts': [payee.amount for payee in payees],
'expiration_date': expiration_date,
'data': data
}
service = get_service_for_currency(currency)
return service.sign_request_as_payee(**service_args)
def broadcast_signed_request(self, signed_request, payer_address, payment_amounts=None,
additional_payments=None):
""" Broadcast a signed Request.
Currently the Request API only supports signing requests as the payee,
therefore this function only supports broadcasting a signed request
as the payer.
:param signed_request: The previously-signed Request to broadcast
:type signed_request: types.Request
:param payment_amounts: A list of integers specifying how much should be
paid to each payee when the Request is created.
The amount is in the currency of the Request.
:type payment_amounts: [int]
:param additional_payments: Additional amounts to pay on top of the `payment_amounts`.
:type additional_payments: [int]
:return: The transaction hash of the transaction which, if successfully
included in a block, will create (and possibly pay, depending on `payment_amounts`)
this Request.
"""
am = ArtifactManager()
service_class = am.get_service_class_by_address(signed_request.currency_contract_address)
# currency = signed_request.currency_cont
service_args = {
'signed_request': signed_request,
'payment_amounts': payment_amounts,
'additional_payments': additional_payments,
'payer_address': payer_address
}
service = service_class()
return service.broadcast_signed_request_as_payer(**service_args)
def get_request_by_id(self, request_id, block_number=None):
""" Get a Request from its ID.
:param request_id: The Request ID as a 32 byte hex string
:param block_number: If provided, only search for Created events from this block onwards.
:return: A Request instance
:rtype: request_network.types.Request
"""
core_contract_address = Web3.toChecksumAddress(request_id[:42])
am = ArtifactManager()
core_contract_data = am.get_contract_data(core_contract_address)
core_contract = w3.eth.contract(
address=core_contract_address,
abi=core_contract_data['abi'])
# Converts the data returned from 'RequestCore:getRequest' into a friendly object
RequestContractData = namedtuple('RequestContractData', [
'payer_address', 'currency_contract_address', 'state',
'payee_id_address', 'amount', 'balance'
])
try:
request_data = RequestContractData(*core_contract.functions.getRequest(
request_id).call())
except ValueError:
# web3 will raise a ValueError if the contract at core_contract_address is not
# a valid contract address. This could happen if the given Request ID contains
# an invalid core_contract_address, so we treat it as an invalid Request ID.
raise RequestNotFound('Request ID {} has an invalid core contract address {}'.format(
request_id,
core_contract_address
))
if request_data.payer_address == EMPTY_BYTES_20:
raise RequestNotFound('Request ID {} not found on core contract {}'.format(
request_id,
core_contract_address
))
# Payment addresses for payees are not stored with the Request in the contract,
# so they need to be looked up separately
service_contract = am.get_contract_instance(request_data.currency_contract_address)
payees = [
Payee(
id_address=request_data.payee_id_address,
amount=request_data.amount,
balance=request_data.balance,
payment_address=service_contract.functions.payeesPaymentAddress(
request_id, 0).call()
)
]
sub_payees_count = core_contract.functions.getSubPayeesCount(request_id).call()
for i in range(sub_payees_count):
(address, amount, balance) = core_contract.functions.subPayees(request_id, i).call()
payment_address = service_contract.functions.payeesPaymentAddress(
request_id, i + 1).call()
payees.append(Payee(
id_address=address,
payment_address=payment_address,
balance=balance,
amount=amount
))
# To find the creator and data for a Request we need to find the Created event
# that was emitted when the Request was created
# web3.py provides helpers for getting logs for a specific contract event but
# they rely on `eth_newFilter` which is not supported on Infura. As a workaround
# the logs are retrieved with `web3.eth`getLogs` which does not require a new
# filter to be created.
created_event_signature = Web3.toHex(event_abi_to_log_topic(
event_abi=core_contract.events.Created().abi
))
logs = w3.eth.getLogs({
'fromBlock': block_number if block_number else core_contract_data['block_number'],
'address': core_contract_address,
'topics': [created_event_signature, request_id]
})
assert len(logs) == 1, "Incorrect number of logs returned"
# Work around Solidity bug. See note in read_padded_data_from_stream.
with mock.patch.object(
StringDecoder,
'read_data_from_stream',
new=read_padded_data_from_stream):
created_event_data = get_event_data(
event_abi=core_contract.events.Created().abi,
log_entry=logs[0]
)
# creator = log_data.args.creator
# See if we have an IPFS hash, and get the file if so
if created_event_data.args.data != '':
ipfs_hash = created_event_data.args.data
data = retrieve_ipfs_data(ipfs_hash)
else:
ipfs_hash = None
data = {}
# Iterate through UpdateBalance events to build a list of payments made for this request
updated_event_signature = Web3.toHex(event_abi_to_log_topic(
event_abi=core_contract.events.UpdateBalance().abi
))
logs = w3.eth.getLogs({
'fromBlock': block_number if block_number else core_contract_data['block_number'],
'address': core_contract_address,
'topics': [updated_event_signature, request_id]
})
payments = []
for log in logs:
event_data = get_event_data(
event_abi=core_contract.events.UpdateBalance().abi,
log_entry=log
)
payments.append(Payment(
payee_index=event_data.args.payeeIndex,
delta_amount=event_data.args.deltaAmount
))
payees[event_data.args.payeeIndex].paid_amount += event_data.args.deltaAmount
return Request(
id=request_id,
state=request_data.state,
creator=created_event_data.args.creator,
currency_contract_address=request_data.currency_contract_address,
payer=request_data.payer_address,
payees=payees,
payments=payments,
ipfs_hash=ipfs_hash,
data=data,
transaction_hash=Web3.toHex(created_event_data.transactionHash)
)
def get_request_by_transaction_hash(self, transaction_hash):
""" Get a Request from an Ethereum transaction hash.
:param transaction_hash: The hash of the transaction which created the Request
:return: A Request instance
:rtype: request_network.types.Request
"""
tx_data = w3.eth.getTransaction(transaction_hash)
if not tx_data:
raise TransactionNotFound(transaction_hash)
am = ArtifactManager()
currency_contract = am.get_contract_instance(tx_data['to'])
# Decode the transaction input data to get the function arguments
func = currency_contract.get_function_by_selector(tx_data['input'][:10])
arg_types = [i['type'] for i in func.abi['inputs']]
arg_names = [i['name'] for i in func.abi['inputs']]
arg_values = decode_abi(arg_types, Web3.toBytes(hexstr=tx_data['input'][10:]))
function_args = dict(zip(arg_names, arg_values))
# If this is a 'simple' Request we can take the ID from the transaction input.
if '_requestId' in function_args:
return self.get_request_by_id(
Web3.toHex(function_args['_requestId']))
# For more complex Requests (e.g. those created by broadcasting a signed Request)
# we need to find the 'Created' event log that was emitted and take the ID from there.
tx_receipt = w3.eth.getTransactionReceipt(transaction_hash)
if not tx_receipt:
raise Exception('TODO could not get tx receipt')
# Extract the event args from the tx_receipt to retrieve the request_id
core_contract = am.get_contract_instance(tx_receipt['logs'][0].address)
# Work around Solidity bug. See note in read_padded_data_from_stream.
with mock.patch.object(
StringDecoder,
'read_data_from_stream',
new=read_padded_data_from_stream):
logs = core_contract.events.Created().processReceipt(tx_receipt)
request_id = logs[0].args.requestId
return self.get_request_by_id(
Web3.toHex(request_id),
block_number=tx_data['blockNumber'])
def get_request_events(self, request_id, from_block=None, to_block=None):
""" Return a list of Events relating to the given request_id.
The events follow the format used by `web3.utils.events.get_event_data`,
except we decode the Request ID, transaction hash and block hash
from bytes to hex strings.
:param request_id:
:param from_block:
:param to_block:
:return:
"""
core_contract_address = Web3.toChecksumAddress(request_id[:42])
am = ArtifactManager()
core_contract_data = am.get_contract_data(core_contract_address)
core_contract = core_contract_data['instance']
event_names = ['Created', 'Accepted', 'Canceled', 'UpdateBalance',
'UpdateExpectedAmount', 'NewSubPayee']
# Iterate through all available events, retrieving matching logs for
# each. Iterate through logs and decode them according to the event's
# ABI so we get an AttrDict containing all event data.
# Events are temporarily stored in a dict(dict(list)) structure, so
# they can be indexed by block number and log index. This is
# required so we can return events in the order they were broadcast.
events = defaultdict(lambda: defaultdict(list))
for event_name in event_names:
event_obj = getattr(core_contract.events, event_name)
event_signature = Web3.toHex(event_abi_to_log_topic(
event_abi=event_obj().abi))
logs = w3.eth.getLogs({
'fromBlock': from_block if from_block else core_contract_data['block_number'],
'toBlock': to_block if to_block else w3.eth.blockNumber,
'address': core_contract_data['address'],
'topics': [event_signature, request_id]
})
for log in logs:
# get_event_data returns an immutable AttributeDict, but we want
# to decode some of the args. So we convert it to a dict, decode
# some data, and then return a new AttributeDict.
event_data = get_event_data(
event_abi=event_obj().abi,
log_entry=log
).__dict__
event_data['args'] = event_data['args'].__dict__
# Convert request ID, tx/block hash from bytes -> hex string
event_data['args']['requestId'] = Web3.toHex(event_data['args']['requestId'])
event_data['blockHash'] = Web3.toHex(event_data['blockHash'])
event_data['transactionHash'] = Web3.toHex(event_data['transactionHash'])
event_data['args'] = AttributeDict(dictionary=event_data['args'])
events[event_data['blockNumber']][event_data['logIndex']].append(
AttributeDict(dictionary=event_data)
)
# Iterate through blocks, logs, and events to get a list of sorted events
sorted_events = []
for block in sorted(events.keys()):
for log_index in sorted(events[block].keys()):
for event in events[block][log_index]:
sorted_events.append(event)
return sorted_events
def pay_request(self, request_id, amounts,
additional_payments=None,
transaction_options=None):
""" Pay a Request.
:param request_id:
:param amounts:
:param additional_payments:
:param transaction_options:
:return:
"""
# TODO validaton
empty_payments = [0] * len(amounts)
additional_payments = additional_payments if additional_payments else empty_payments
request = self.get_request_by_id(request_id)
service_args = {
'request_id': request_id,
'payment_amounts': amounts,
'additional_payments': additional_payments,
# TODO pass through entire transaction_options, refactor later
'from_address': transaction_options['from']
}
service = self._get_service_for_request(request)
return service.pay_request(**service_args)
def _get_service_for_request(self, request):
""" Returns an instance of the service class for a given Request.
:param request: request_network.types.Request
:return:
"""
am = ArtifactManager()
service_class = am.get_service_class_by_address(request.currency_contract_address)
# TODO fix this later so we can pass in token_address for ERC20 service
return service_class()
def accept_request(self, request_id, transaction_options):
""" Accept a Request.
:param request_id:
:param transaction_options:
:return:
"""
request = self.get_request_by_id(request_id)
service = self._get_service_for_request(request)
service_args = {
'request_id': request_id,
'payer_address': transaction_options['from'] # TODO
}
return service.accept_request(**service_args)
def cancel_request(self, request_id, transaction_options):
""" Cancel a Request.
:param request_id:
:param transaction_options:
:return:
"""
request = self.get_request_by_id(request_id)
service = self._get_service_for_request(request)
service_args = {
'request_id': request_id,
'from_address': transaction_options['from'] # TODO
}
return service.cancel_request(**service_args)
def refund_request(self, request_id, refund_amount, transaction_options):
""" Refund a Request.
:param request_id:
:param refund_amount:
:param transaction_options:
:return:
"""
request = self.get_request_by_id(request_id)
service = self._get_service_for_request(request)
service_args = {
'request_id': request_id,
'refund_amount': refund_amount,
'from_address': transaction_options['from'] # TODO
}
return service.refund_request(**service_args)
def add_additional_payment(self, request_id, amounts, transaction_options):
""" Add additional payments to an existing Request.
:param request_id:
:param amounts:
:param transaction_options:
:return:
"""
request = self.get_request_by_id(request_id)
service = self._get_service_for_request(request)
service_args = {
'request_id': request_id,
'amounts': amounts,
'from_address': transaction_options['from'] # TODO
}
return service.add_additional_payment(**service_args)
def add_discount(self, request_id, amounts, transaction_options):
""" Add a discount to an existing Request.
:param request_id:
:param amounts:
:param transaction_options:
:return:
"""
request = self.get_request_by_id(request_id)
service = self._get_service_for_request(request)
service_args = {
'request_id': request_id,
'amounts': amounts,
'from_address': transaction_options['from'] # TODO
}
return service.add_discount(**service_args)
def read_padded_data_from_stream(self, stream):
""" This function exists to work around a bug in Solidity:
https://github.com/ethereum/web3.py/issues/602
https://github.com/ethereum/solidity/issues/3493
Data from logs differs if the event is emitted during an external
or internal solidity function call.
The workaround is to pad the data until if fits the padded length.
:param self:
:param stream:
:return:
"""
from eth_abi.utils.numeric import ceil32
data_length = decode_uint_256(stream)
padded_length = ceil32(data_length)
data = stream.read(padded_length)
# Start change
# Manually pad data to force it to desired length
if len(data) < padded_length:
data += b'\x00' * (padded_length - data_length)
# End change
if len(data) < padded_length:
from eth_abi.exceptions import InsufficientDataBytes
raise InsufficientDataBytes(
"Tried to read {0} bytes. Only got {1} bytes".format(
padded_length,
len(data),
)
)
padding_bytes = data[data_length:]
if padding_bytes != b'\x00' * (padded_length - data_length):
from eth_abi.exceptions import NonEmptyPaddingBytes
raise NonEmptyPaddingBytes(
"Padding bytes were not empty: {0}".format(repr(padding_bytes))
)
return data[:data_length] | /request_network-0.0.10-py3-none-any.whl/request_network/api.py | 0.903212 | 0.266006 | api.py | pypi |
import time
from eth_account.messages import (
defunct_hash_message,
)
from web3 import Web3
from web3.auto import (
w3,
)
from request_network.artifact_manager import (
ArtifactManager,
)
from request_network.constants import (
EMPTY_BYTES_20,
)
from request_network.exceptions import (
InvalidRequestParameters,
)
from request_network.signers import (
environment_variable_hash_signer,
environment_variable_transaction_signer,
)
from request_network.types import (
Payee,
Request,
)
from request_network.utils import (
hash_request,
store_ipfs_data,
)
class RequestCoreService(object):
""" Class for Request Core
This class should not be used directly - instead, use a child class such as
`RequestEthereumService` or `RequestERC20Service`.
"""
def get_currency_contract_data(self):
""" Return the currency contract for the given currency. `artifact_name` could
be `last-RequestEthereum`, or `last-requesterc20-{token_address}`.
"""
artifact_name = self._get_currency_contract_artifact_name()
artifact_manager = ArtifactManager()
contract_data = artifact_manager.get_contract_data(artifact_name)
return contract_data
def _get_currency_contract_artifact_name(self):
""" Return the artifact name used when looking up the currency contract.
"""
raise NotImplementedError()
def broadcast_signed_request_as_payer(self, signed_request, payer_address,
payment_amounts=None, additional_payments=None):
raise NotImplementedError()
def create_request_as_payee(self, id_addresses, amounts,
payer_refund_address, payer_id_address,
payment_addresses=None,
data=None):
# validate request args
if payment_addresses:
payment_addresses = [
Web3.toChecksumAddress(a) if a else EMPTY_BYTES_20 for a in payment_addresses
]
else:
payment_addresses = [EMPTY_BYTES_20] * len(id_addresses)
# call collectEstimation on the currency contract
ipfs_hash = store_ipfs_data(data) if data else ''
# TODO better validation, more DRY
# Validate Request parameters
if len(id_addresses) != len(amounts):
raise InvalidRequestParameters(
'payees and amounts must be the same size'
)
if payment_addresses and len(id_addresses) < len(payment_addresses):
raise InvalidRequestParameters(
'payees can not be larger than payee_payment_addresses'
)
for amount in amounts:
if int(amount) < 0:
raise InvalidRequestParameters(
'amounts must be positive integers'
)
for address in id_addresses + payment_addresses:
if not Web3.isAddress(address):
raise InvalidRequestParameters(
'{} is not a valid Ethereum address'.format(address)
)
if payer_id_address == id_addresses[0]:
raise InvalidRequestParameters(
'Payer can not be the main payee'
)
# call fee estimator, set as value for tx
currency_contract_data = self.get_currency_contract_data()
currency_contract = currency_contract_data['instance']
estimated_value = currency_contract.functions.collectEstimation(
_expectedAmount=sum(a for a in amounts)
).call()
transaction_options = {
'from': id_addresses[0],
'value': estimated_value
}
# TODO update to use buildTransaction
tx_hash = currency_contract.functions.createRequestAsPayee(
_payeesIdAddress=id_addresses,
_payeesPaymentAddress=payment_addresses,
_expectedAmounts=amounts,
_payer=payer_id_address,
_payerRefundAddress=payer_refund_address,
_data=ipfs_hash
).transact(transaction_options)
return Web3.toHex(tx_hash)
def create_request_as_payer(self, id_addresses, amounts,
payer_refund_address, payer_id_address,
payment_addresses=None,
data=None,
creation_payments=None, additional_payments=None):
"""
:param id_addresses:
:param amounts:
:param payment_addresses:
:param payer_refund_address:
:param payer_id_address:
:param data:
:param creation_payments: Amount to pay when Request is created
:param additional_payments: Additional amount to pay each payee, on top of expected amount
:param options:
:return:
"""
if payment_addresses:
payment_addresses = [
Web3.toChecksumAddress(a) if a else EMPTY_BYTES_20 for a in payment_addresses
]
else:
payment_addresses = [EMPTY_BYTES_20] * len(id_addresses)
creation_payments = creation_payments if creation_payments else []
additional_payments = additional_payments if additional_payments else []
ipfs_hash = store_ipfs_data(data) if data else ''
# TODO better validation, more DRY
# Validate Request parameters
if len(id_addresses) != len(amounts):
raise InvalidRequestParameters(
'payees and amounts must be the same size'
)
if payment_addresses and len(id_addresses) < len(payment_addresses):
raise InvalidRequestParameters(
'payees can not be larger than payee_payment_addresses'
)
if creation_payments and len(id_addresses) < len(creation_payments):
raise InvalidRequestParameters(
'payees can not be larger than creation_payments'
)
if additional_payments and len(id_addresses) < len(additional_payments):
raise InvalidRequestParameters(
'payees can not be larger than additional_payments'
)
for amount in amounts:
if int(amount) < 0:
raise InvalidRequestParameters(
'amounts must be positive integers'
)
for amount in creation_payments:
if int(amount) < 0:
raise InvalidRequestParameters(
'amounts must be positive integers'
)
for address in id_addresses + payment_addresses:
if not Web3.isAddress(address):
raise InvalidRequestParameters(
'{} is not a valid Ethereum address'.format(address)
)
if payer_id_address == id_addresses[0]:
raise InvalidRequestParameters(
'Payer can not be the main payee'
)
# call fee estimator, set as value for tx
currency_contract_data = self.get_currency_contract_data()
currency_contract = currency_contract_data['instance']
estimated_value = currency_contract.functions.collectEstimation(
_expectedAmount=sum(a for a in amounts)
).call()
transaction_options = {
'from': payer_id_address,
'value': estimated_value,
'nonce': w3.eth.getTransactionCount(payer_id_address, 'pending'),
'gasPrice': 5700000000,
'gas': 176587
}
tx = currency_contract.functions.createRequestAsPayer(
_payeesIdAddress=id_addresses,
_expectedAmounts=amounts,
_payerRefundAddress=payer_refund_address,
_payeeAmounts=creation_payments,
_additionals=additional_payments,
_data=ipfs_hash
).buildTransaction(transaction_options)
signed_tx = environment_variable_transaction_signer(
tx=tx,
address=payer_id_address
)
tx_hash = w3.eth.sendRawTransaction(signed_tx.rawTransaction)
return Web3.toHex(tx_hash)
def create_signed_request(self, currency_contract_address, id_addresses, amounts,
payment_addresses, expiration_date,
data=None):
""" Create a Signed Request.
:param currency_contract_address: Address of the currency contract for this
Request's currency
:param id_addresses: List of Ethereum addresses identifying the recipients
:type id_addresses: [str]
:param amounts: List of payment amounts
:type amounts: [int]
:param payment_addresses: List of Ethereum addresses to which funds will be paid
:type payment_addresses: [str]
:param expiration_date: Unix timestamp after which Request can no longer be broadcast
:param data: Additional data to store with the Request
:return:
"""
# If we have data, store it on IPFS
ipfs_hash = store_ipfs_data(data) if data else ''
request_hash = hash_request(
currency_contract_address=currency_contract_address,
id_addresses=id_addresses,
amounts=amounts,
payer=None,
expiration_date=expiration_date,
payment_addresses=payment_addresses,
ipfs_hash=ipfs_hash)
# `defunct_hash_message` is used to maintain compatibility with `web3Single.sign()`
message_hash = defunct_hash_message(hexstr=request_hash)
# TODO make signing strategy configurable
# TODO signer should accept an optional dict describing the Request attributes so
# it can enforce controls (rate-limiting, max Request amount, etc.)
signer_function = environment_variable_hash_signer
signed_message = signer_function(
message_hash=message_hash,
address=id_addresses[0]
)
# Combine id_addresses/payment_addresses/amounts into a list of Payees
payees = []
for id_address, payment_address, amount in zip(id_addresses, payment_addresses, amounts):
payees.append(Payee(
id_address=id_address,
payment_address=payment_address,
amount=amount))
request = Request(
payees=payees,
_hash=request_hash,
currency_contract_address=currency_contract_address,
ipfs_hash=ipfs_hash,
data=data,
payer=None,
expiration_date=expiration_date,
signature=Web3.toHex(signed_message.signature)
)
return request
def sign_request_as_payee(self, id_addresses, amounts,
payment_addresses, expiration_date,
data=None):
""" Sign a Request as the payee.
:param id_addresses:
:param amounts:
:param expiration_date:
:param payment_addresses:
:param data:
:return:
"""
# Iterate through payee addresses - if a None value is given for any address,
# replace it with the 0x0 address (padded to 20 bytes).
parsed_payee_payment_addresses = [
Web3.toChecksumAddress(a) if a else EMPTY_BYTES_20 for a in payment_addresses
]
id_addresses = [
Web3.toChecksumAddress(a) for a in id_addresses
]
# Validate Request parameters
if len(id_addresses) != len(amounts):
raise InvalidRequestParameters(
'payees and amounts must be the same size'
)
if payment_addresses and len(id_addresses) < len(payment_addresses):
raise InvalidRequestParameters(
'payees can not be larger than payee_payment_addresses'
)
if int(expiration_date) <= int(time.time()):
raise InvalidRequestParameters(
'expiration_date must be in the future'
)
for amount in amounts:
if int(amount) < 0:
raise InvalidRequestParameters(
'amounts must be positive integers'
)
for address in id_addresses:
if not Web3.isAddress(address):
raise InvalidRequestParameters(
'{} is not a valid Ethereum address'.format(address)
)
currency_contract_data = self.get_currency_contract_data()
return self.create_signed_request(
currency_contract_address=currency_contract_data['address'],
id_addresses=id_addresses,
amounts=amounts,
payment_addresses=parsed_payee_payment_addresses,
expiration_date=expiration_date,
data=data
) | /request_network-0.0.10-py3-none-any.whl/request_network/services/core.py | 0.591251 | 0.171269 | core.py | pypi |
import grequests
def extract_retry(request):
"""extracts the value of the key retry from the dict or returns 0 if Not found
Args:
request(dict): required to have
- method(string): http method to use
- url(string): url of the requested resource
- kwargs(dict): defines more specs for the request
- data(dict): if request has json body
- headers(dict): if you want to attach any headers to the request
Returns:
int: how many times this request was retried
"""
if not request.get('retries'):
return 0
else:
return request.get('retries')
def index_requests(reqs):
"""adds an index key for each dict entry in the array
Args:
requests_set (list): Array of requests to send:
request(dict): required to have
- method(string): http method to use
- url(string): url of the requested resource
- kwargs(dict): defines more specs for the request
- data(dict): if request has json body
- headers(dict): if you want to attach any headers to the request
Returns:
Array: Array of all requests with indexed key in each request dict {'index': index}.
"""
for idx, req in enumerate(reqs):
req['index'] = idx
return reqs
def send_requests(requests_set, aperature, max_retry):
"""
Args:
requests_set (list): Array of requests to send:
request(dict): required to have
- method(string): http method to use
- url(string): url of the requested resource
- kwargs(dict): defines more specs for the request
- data(dict): if request has json body
- headers(dict): if you want to attach any headers to the request
aperature (int): Max number of concurrent requests to be sent.
max_retry (int): Max number of retries for failed requests
Returns:
tuple:
list: Array of all request responses handled.
Raises:
AttributeError:
ValueError: If `param2` is equal to `param1`.
"""
responses = []
requests_set = index_requests(requests_set)
while len(requests_set):
staged_set = requests_set[:aperature] # slice current window
requests_set = requests_set[aperature:] # slice out executed requests from requests_set
active_set = (grequests.request(r.get('method'), r.get('url'), **r.get('kwargs'))
for r in staged_set) # requests' objects formation
response_set = grequests.map(active_set) # concurrent execution
for idx, response in enumerate(response_set):
request = staged_set[idx]
if response and response.status_code < 400:
responses.append({'response': response, 'index': request.get('index')})
else:
retry = extract_retry(request)
if retry < max_retry:
request['retries'] = retry + 1
requests_set.append(request)
else:
responses.append({'response': response, 'index': request.get('index')})
return sorted(responses, key=lambda r: r.get('index')) | /request_ray-0.1.2-py3-none-any.whl/request_ray/rray.py | 0.828141 | 0.251536 | rray.py | pypi |
from __future__ import annotations
import datetime
import logging
from typing import Any, Optional
from django.conf import settings
from django.contrib.auth import login
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.db import models, transaction
from django.http import HttpRequest
from django.http.response import HttpResponse
from django.utils.timezone import now as tz_now
from jwt.exceptions import InvalidAudienceError, InvalidTokenError
from .exceptions import MaxUseError
from .settings import JWT_SESSION_TOKEN_EXPIRY, LOG_TOKEN_ERRORS
from .utils import encode, to_seconds
logger = logging.getLogger(__name__)
class RequestTokenQuerySet(models.query.QuerySet):
"""Custom QuerySet for RquestToken objects."""
def create_token(self, scope: str, **kwargs: Any) -> RequestToken:
"""Create a new RequestToken."""
return RequestToken(scope=scope, **kwargs).save()
class RequestToken(models.Model):
"""
A link token, targeted for use by a known Django User.
A RequestToken contains information that can be encoded as a JWT
(JSON Web Token). It is designed to be used in conjunction with the
RequestTokenMiddleware (responsible for JWT verification) and the
@use_request_token decorator (responsible for validating the token
and setting the request.user correctly).
Each token must have a 'scope', which is used to tie it to a view function
that is decorated with the `use_request_token` decorator. The token can
only be used by functions with matching scopes.
The token may be set to a specific User, in which case, if the existing
request is unauthenticated, it will use that user as the `request.user`
property, allowing access to authenticated views.
The token may be timebound by the `not_before_time` and `expiration_time`
properties, which are registered JWT 'claims'.
The token may be restricted by the number of times it can be used, through
the `max_use` property, which is incremented each time it's used (NB *not*
thread-safe).
The token may also store arbitrary serializable data, which can be used
by the view function if the request token is valid.
JWT spec: https://tools.ietf.org/html/rfc7519
"""
# do not login the user on the request
LOGIN_MODE_NONE = "None"
# login the user, but only for the original request
LOGIN_MODE_REQUEST = "Request"
# login the user fully, but only for single-use short-duration links
LOGIN_MODE_SESSION = "Session"
LOGIN_MODE_CHOICES = (
(LOGIN_MODE_NONE, "Do not authenticate"),
(LOGIN_MODE_REQUEST, "Authenticate a single request"),
(LOGIN_MODE_SESSION, "Authenticate for the entire session"),
)
login_mode = models.CharField(
max_length=10,
default=LOGIN_MODE_NONE,
choices=LOGIN_MODE_CHOICES,
help_text="How should the request be authenticated?",
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="request_tokens",
blank=True,
null=True,
on_delete=models.CASCADE,
help_text="Intended recipient of the JWT (can be used by anyone if not set).",
)
scope = models.CharField(
max_length=100,
help_text="Label used to match request to view function in decorator.",
)
expiration_time = models.DateTimeField(
blank=True,
null=True,
help_text="Token will expire at this time (raises ExpiredSignatureError).",
)
not_before_time = models.DateTimeField(
blank=True,
null=True,
help_text=(
"Token cannot be used before this time (raises ImmatureSignatureError)."
),
)
data = JSONField(
help_text=(
"Custom data add to the token, but not encoded (must be fetched from DB)."
),
blank=True,
null=True,
default=dict,
)
issued_at = models.DateTimeField(
blank=True,
null=True,
help_text="Time the token was created (set in the initial save).",
)
max_uses = models.IntegerField(
default=1, help_text="The maximum number of times the token can be used."
)
used_to_date = models.IntegerField(
default=0,
help_text=(
"Number of times the token has been used to date (raises MaxUseError)."
),
)
objects = RequestTokenQuerySet.as_manager()
class Meta:
verbose_name = "Token"
verbose_name_plural = "Tokens"
def __str__(self) -> str:
return "Request token #%s" % (self.id)
def __repr__(self) -> str:
return "<RequestToken id=%s scope=%s login_mode='%s'>" % (
self.id,
self.scope,
self.login_mode,
)
@property
def aud(self) -> Optional[int]:
"""Return 'aud' claim, mapped to user.id."""
return self.claims.get("aud")
@property
def exp(self) -> Optional[datetime.datetime]:
"""Return 'exp' claim, mapped to expiration_time."""
return self.claims.get("exp")
@property
def nbf(self) -> Optional[datetime.datetime]:
"""Return the 'nbf' claim, mapped to not_before_time."""
return self.claims.get("nbf")
@property
def iat(self) -> Optional[datetime.datetime]:
"""Return the 'iat' claim, mapped to issued_at."""
return self.claims.get("iat")
@property
def jti(self) -> Optional[int]:
"""Return the 'jti' claim, mapped to id."""
return self.claims.get("jti")
@property
def max(self) -> int:
"""Return the 'max' claim, mapped to max_uses."""
return self.claims["max"]
@property
def sub(self) -> str:
"""Return the 'sub' claim, mapped to scope."""
return self.claims["sub"]
@property
def claims(self) -> dict:
"""Return dict containing all of the DEFAULT_CLAIMS (where values exist)."""
claims = {
"max": self.max_uses,
"sub": self.scope,
"mod": self.login_mode[:1].lower(),
}
if self.id is not None:
claims["jti"] = self.id
if self.user is not None:
claims["aud"] = self.user.id
if self.expiration_time is not None:
claims["exp"] = to_seconds(self.expiration_time)
if self.issued_at is not None:
claims["iat"] = to_seconds(self.issued_at)
if self.not_before_time is not None:
claims["nbf"] = to_seconds(self.not_before_time)
return claims
def clean(self) -> None:
"""Ensure that login_mode setting is valid."""
if self.login_mode == RequestToken.LOGIN_MODE_NONE:
pass
if self.login_mode == RequestToken.LOGIN_MODE_SESSION:
if self.user is None:
raise ValidationError({"user": "Session token must have a user."})
if self.max_uses != 1:
raise ValidationError(
{"max_uses": "Session token must have max_use of 1."}
)
if self.expiration_time is None:
raise ValidationError(
{"expiration_time": "Session token must have an expiration_time."}
)
if self.login_mode == RequestToken.LOGIN_MODE_REQUEST:
if self.user is None:
raise ValidationError(
{"expiration_time": "Request token must have a user."}
)
def save(self, *args: Any, **kwargs: Any) -> RequestToken:
if "update_fields" not in kwargs:
self.issued_at = self.issued_at or tz_now()
if self.login_mode == RequestToken.LOGIN_MODE_SESSION:
self.expiration_time = self.expiration_time or (
self.issued_at
+ datetime.timedelta(minutes=JWT_SESSION_TOKEN_EXPIRY)
)
self.clean()
super(RequestToken, self).save(*args, **kwargs)
return self
def jwt(self) -> str:
"""Encode the token claims into a JWT."""
return encode(self.claims).decode()
def validate_max_uses(self) -> None:
"""
Check the token max_uses is still valid.
Raises MaxUseError if invalid.
"""
if self.used_to_date >= self.max_uses:
raise MaxUseError("RequestToken [%s] has exceeded max uses" % self.id)
def _auth_is_anonymous(self, request: HttpRequest) -> HttpRequest:
"""Authenticate anonymous requests."""
if request.user.is_authenticated:
raise InvalidAudienceError("Token requires anonymous user.")
if self.login_mode == RequestToken.LOGIN_MODE_NONE:
pass
if self.login_mode == RequestToken.LOGIN_MODE_REQUEST:
logger.debug(
"Setting request.user to %r from token %i.", self.user, self.id
)
request.user = self.user
if self.login_mode == RequestToken.LOGIN_MODE_SESSION:
logger.debug(
"Authenticating request.user as %r from token %i.", self.user, self.id
)
# I _think_ we can get away with this as we are pulling the
# user out of the DB, and we are explicitly authenticating
# the user.
self.user.backend = "django.contrib.auth.backends.ModelBackend"
login(request, self.user)
return request
def _auth_is_authenticated(self, request: HttpRequest) -> HttpRequest:
"""Authenticate requests with existing users."""
if request.user.is_anonymous:
raise InvalidAudienceError("Token requires authenticated user.")
if self.login_mode == RequestToken.LOGIN_MODE_NONE:
return request
if request.user == self.user:
return request
raise InvalidAudienceError(
"RequestToken [%i] audience mismatch: '%s' != '%s'"
% (self.id, request.user, self.user)
)
def authenticate(self, request: HttpRequest) -> HttpRequest:
"""
Authenticate an HttpRequest with the token user.
This method encapsulates the request handling - if the token
has a user assigned, then this will be added to the request.
"""
if request.user.is_anonymous:
return self._auth_is_anonymous(request)
else:
return self._auth_is_authenticated(request)
@transaction.atomic
def log(
self,
request: HttpRequest,
response: HttpResponse,
error: Optional[InvalidTokenError] = None,
) -> RequestTokenLog:
"""
Record the use of a token.
This is used by the decorator to log each time someone uses the token,
or tries to. Used for reporting, diagnostics.
Args:
request: the HttpRequest object that used the token, from which the
user, ip and user-agenct are extracted.
response: the corresponding HttpResponse object, from which the status
code is extracted.
error: an InvalidTokenError that gets logged as a RequestTokenError.
Returns a RequestTokenUse object.
"""
def rmg(key: str, default: Any = None) -> Any:
return request.META.get(key, default)
log = RequestTokenLog(
token=self,
user=None if request.user.is_anonymous else request.user,
user_agent=rmg("HTTP_USER_AGENT", "unknown"),
client_ip=parse_xff(rmg("HTTP_X_FORWARDED_FOR"))
or rmg("REMOTE_ADDR", None),
status_code=response.status_code,
).save()
if error and LOG_TOKEN_ERRORS:
RequestTokenErrorLog.objects.create_error_log(log, error)
# NB this will include all error logs - which means that an error log
# may prohibit further use of the token. Is there a scenario in which
# this would be the wrong outcome?
self.used_to_date = self.logs.filter(error__isnull=True).count()
self.save()
return log
def expire(self) -> None:
"""Mark the token as expired immediately, effectively killing the token."""
self.expiration_time = tz_now() - datetime.timedelta(microseconds=1)
self.save()
def parse_xff(header_value: str) -> Optional[str]:
"""
Parse out the X-Forwarded-For request header.
This handles the bug that blows up when multiple IP addresses are
specified in the header. The docs state that the header contains
"The originating IP address", but in reality it contains a list
of all the intermediate addresses. The first item is the original
client, and then any intermediate proxy IPs. We want the original.
Returns the first IP in the list, else None.
"""
try:
return header_value.split(",")[0].strip()
except (KeyError, AttributeError):
return None
class RequestTokenLog(models.Model):
"""Used to log the use of a RequestToken."""
token = models.ForeignKey(
RequestToken,
related_name="logs",
help_text="The RequestToken that was used.",
on_delete=models.CASCADE,
db_index=True,
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.CASCADE,
help_text="The user who made the request (None if anonymous).",
)
user_agent = models.TextField(
blank=True, help_text="User-agent of client used to make the request."
)
client_ip = models.GenericIPAddressField(
blank=True,
null=True,
unpack_ipv4=True,
help_text="Client IP of device used to make the request.",
)
status_code = models.IntegerField(
blank=True,
null=True,
help_text="Response status code associated with this use of the token.",
)
timestamp = models.DateTimeField(
blank=True, help_text="Time the request was logged."
)
class Meta:
verbose_name = "Log"
verbose_name_plural = "Logs"
def __str__(self) -> str:
if self.user is None:
return "%s used %s" % (self.token, self.timestamp)
else:
return "%s used by %s at %s" % (self.token, self.user, self.timestamp)
def __repr__(self) -> str:
return "<RequestTokenLog id=%s token=%s timestamp='%s'>" % (
self.id,
self.token.id,
self.timestamp,
)
def save(self, *args: Any, **kwargs: Any) -> RequestToken:
if "update_fields" not in kwargs:
self.timestamp = self.timestamp or tz_now()
super(RequestTokenLog, self).save(*args, **kwargs)
return self
class RequestTokenErrorLogQuerySet(models.query.QuerySet):
def create_error_log(
self, log: RequestTokenLog, error: Exception
) -> RequestTokenErrorLog:
return RequestTokenErrorLog(
token=log.token,
log=log,
error_type=type(error).__name__,
error_message=str(error),
)
class RequestTokenErrorLog(models.Model):
"""Used to log errors that occur with the use of a RequestToken."""
token = models.ForeignKey(
RequestToken,
related_name="errors",
on_delete=models.CASCADE,
help_text="The RequestToken that was used.",
db_index=True,
)
log = models.OneToOneField(
RequestTokenLog,
related_name="error",
on_delete=models.CASCADE,
help_text="The token use against which the error occurred.",
db_index=True,
)
error_type = models.CharField(
max_length=50, help_text="The underlying type of error raised."
)
error_message = models.CharField(
max_length=200, help_text="The error message supplied."
)
objects = RequestTokenErrorLogQuerySet().as_manager()
class Meta:
verbose_name = "Error"
verbose_name_plural = "Errors"
def __str__(self) -> str:
return self.error_message
def save(self, *args: Any, **kwargs: Any) -> RequestTokenErrorLog:
super(RequestTokenErrorLog, self).save(*args, **kwargs)
return self | /request_token-0.10-py3-none-any.whl/request_token/models.py | 0.892217 | 0.198724 | models.py | pypi |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="RequestToken",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"target_url",
models.CharField(help_text="The target endpoint.", max_length=200),
),
(
"expiration_time",
models.DateTimeField(
help_text="DateTime at which this token expires.",
null=True,
blank=True,
),
),
(
"not_before_time",
models.DateTimeField(
help_text="DateTime before which this token is invalid.",
null=True,
blank=True,
),
),
(
"data",
models.TextField(
help_text="Custom data (JSON) added to the default payload.",
max_length=1000,
blank=True,
),
),
(
"issued_at",
models.DateTimeField(
help_text="Time the token was created, set in the initial save.",
null=True,
blank=True,
),
),
(
"max_uses",
models.IntegerField(
default=1,
help_text="Cap on the number of times the token can be used, defaults to 1 (single use).",
),
),
(
"used_to_date",
models.IntegerField(
default=0,
help_text="Denormalised count of the number times the token has been used.",
),
),
(
"user",
models.ForeignKey(
blank=True,
to=settings.AUTH_USER_MODEL,
help_text="Intended recipient of the JWT.",
null=True,
on_delete=models.deletion.CASCADE,
),
),
],
),
migrations.CreateModel(
name="RequestTokenLog",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"user_agent",
models.TextField(
help_text="User-agent of client used to make the request.",
blank=True,
),
),
(
"client_ip",
models.CharField(
help_text="Client IP of device used to make the request.",
max_length=15,
),
),
(
"timestamp",
models.DateTimeField(help_text="Time the request was logged."),
),
(
"token",
models.ForeignKey(
help_text="The RequestToken that was used.",
to="request_token.RequestToken",
on_delete=models.deletion.CASCADE,
),
),
(
"user",
models.ForeignKey(
blank=True,
to=settings.AUTH_USER_MODEL,
help_text="The user who made the request (None if anonymous).",
null=True,
on_delete=models.deletion.CASCADE,
),
),
],
),
] | /request_token-0.10-py3-none-any.whl/request_token/migrations/0001_initial.py | 0.680772 | 0.165931 | 0001_initial.py | pypi |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("request_token", "0002_auto_20151227_1428")]
operations = [
migrations.AddField(
model_name="requesttoken",
name="scope",
field=models.CharField(
default="",
help_text="Label used to match request to view function in decorator.",
max_length=100,
),
preserve_default=False,
),
migrations.AlterField(
model_name="requesttoken",
name="expiration_time",
field=models.DateTimeField(
help_text="Token will expire at this time (raises ExpiredSignatureError).",
null=True,
blank=True,
),
),
migrations.AlterField(
model_name="requesttoken",
name="issued_at",
field=models.DateTimeField(
help_text="Time the token was created (set in the initial save).",
null=True,
blank=True,
),
),
migrations.AlterField(
model_name="requesttoken",
name="max_uses",
field=models.IntegerField(
default=1,
help_text="The maximum number of times the token can be used.",
),
),
migrations.AlterField(
model_name="requesttoken",
name="not_before_time",
field=models.DateTimeField(
help_text="Token cannot be used before this time (raises ImmatureSignatureError).",
null=True,
blank=True,
),
),
migrations.AlterField(
model_name="requesttoken",
name="used_to_date",
field=models.IntegerField(
default=0,
help_text="Number of times the token has been used to date (raises MaxUseError).",
),
),
migrations.AlterField(
model_name="requesttoken",
name="user",
field=models.ForeignKey(
blank=True,
to=settings.AUTH_USER_MODEL,
help_text="Intended recipient of the JWT (can be used by anyone if not set).",
null=True,
on_delete=models.deletion.CASCADE,
),
),
] | /request_token-0.10-py3-none-any.whl/request_token/migrations/0003_auto_20151229_1105.py | 0.736401 | 0.1526 | 0003_auto_20151229_1105.py | pypi |
from functools import wraps
from flask import request as req
def validate_headers(header_param):
"""Check for the headers keys exist or not in api request.
The validate_headers_list function is a decorator that checks if the
required headers are present in the request. If any of them are
missing, it returns an error response with status code 400 and error code
EMPLOYEE_HEADER_VALIDATION_ERROR.
Args:
header_param (dict): Pass the list of headers that need to be
validated.
Returns:
A decorator that is used to validate the headers list.
"""
def decorator(function):
"""Create decorator function.
The decorator function checks the header parameters of the request.
If any of them is missing, it returns an error response.
Args:
function (func): Pass the function to be decorated.
Returns:
function (func): A wrapper function.
"""
@wraps(function)
def check_argument(*args, **kw):
"""Verify the arguments.
The check_argument function is a decorator that checks if the
request has all the required headers. If not, it returns an
error response with status code 400 and error message.
Args:
*args (list): Send a non-keyword variable length argument
list to the function.
**kw (dict): Pass a variable number of keyword arguments to
the function.
Returns:
The error_response if the header is not present or null.
"""
headers = req.headers
for header_key in header_param:
if header_key not in headers or \
not headers.get(header_key):
return {"error" : "error"}
return function(*args, **kw)
return check_argument
return decorator
def validate_arguments(param):
"""Check for the arguments keys exist or not in api request.
The validate_arguments function is a decorator that checks if the
required parameters are present in the request. If not, it returns an
error response with status code 400 and error code 1002.
Args:
param (dict): Check if the required parameters are present in the
request.
Returns:
A decorator which returns a function.
"""
def decorator(function):
"""Create decorator function.
The decorator function checks if the required parameters are present in the request.
If not, it returns an error response with a message and status code 400.
Args:
function (func): Pass the function to be decorated.
Returns:
function (func): A function.
"""
@wraps(function)
def check_argument(*args, **kw):
"""Verify the arguments.
The check_argument function is a decorator that checks if the
required parameters are present in the request. If not,
it returns an error response with status code 400.
Args:
*args (list): Send a non-keyword variable length argument
list to the function.
**kw (dict): Pass a variable number of keyword arguments to
the function.
Returns:
function (func): The function if the required parameters
are present in the request.
"""
request_param = req.get_json()
if "req_param" not in request_param:
error_response = {"error": "error"}
return error_response
for json_key in param:
if json_key not in request_param["req_param"]:
error_response = {"error": "error"}
return error_response
return function(*args, **kw)
return check_argument
return decorator | /request_validator_galaxy-1.4.tar.gz/request_validator_galaxy-1.4/pk_src/request_validator_galaxy/example/custom_sklearn.py | 0.886537 | 0.435902 | custom_sklearn.py | pypi |
import copy
from .fields import Field
class BaseSerializer(object):
def __init__(self, data=None, source=None, required=True, force_valid=False, allow_null=False):
self._initial_data = data
self._source = source
self._required = required
self._force_valid = force_valid
self._errors = None
self._validated_data = None
self._allow_null = allow_null
def get_errors(self):
return self._errors
def validate_data(self):
return self._validated_data
def has_error(self):
return len(self._errors) != 0
@property
def errors(self):
return self.get_errors()
class SingleSerializer(BaseSerializer):
def __init__(self, *args, **kwargs):
super(SingleSerializer, self).__init__(*args, **kwargs)
self._validated_data = {}
self._errors = {}
self._default = {}
self._all_fields_valid = True
def validate_data(self):
if not (self._force_valid and self.has_error()) and self._all_fields_valid:
return self._validated_data
return {}
@classmethod
def fields(cls):
if "_fields_dict" not in cls.__dict__:
cls._fields_dict = {}
for field in cls._get_fields():
if field in cls.__dict__:
cls._fields_dict[field] = getattr(cls, field)
if hasattr(cls, field):
delattr(cls, field)
if len(cls._get_classes()) > 1:
cls._fields_dict.update(cls._get_parent().fields())
return cls._fields_dict
@classmethod
def _get_fields(cls):
if '_fields' not in cls.__dict__:
cls._fields = []
for field in set(dir(cls)) - set(dir(cls._get_parent())):
if not isinstance(getattr(cls, field), (Field, BaseSerializer)):
continue
cls._fields.append(field)
if len(cls._get_classes()) > 1:
cls._fields = cls._fields + cls._get_parent()._get_fields()
return cls._fields
@classmethod
def _get_classes(cls):
if "_base_classes" not in cls.__dict__:
the_class = cls
cls._base_classes = []
while True:
bases = the_class.__bases__
if len(bases) == 0:
break
assert len(bases) == 1, """ can not use multiple extend"""
the_class = bases[0]
cls._base_classes.append(the_class)
if the_class == Serializer:
break
return cls._base_classes
@classmethod
def _get_parent(cls):
return cls._get_classes()[0]
def _get_field(self, key):
return copy.deepcopy(getattr(self, '_fields_dict')[key])
@property
def data(self):
data = self.validate_data()
for key, value in list(self.fields().items()):
if key not in data:
data[key] = value._default
return data
def add_error(self, index, value):
self._errors[index] = value
def is_valid(self):
validated_data, serializer_validated = self._validate(self._initial_data)
self._validated_data = validated_data
return not self.has_error()
def _validate(self, initial_data):
if initial_data is None and self._allow_null:
return None, True
serializer_validated = True
validate_data = {}
errors, initial_data = self._check_user_validation(initial_data)
if len(errors) != 0:
self._all_fields_valid = False
for error in errors:
for key, value in error.items():
self.add_error(key, value)
for attr in self.fields():
field = self._get_field(attr)
if isinstance(field, Field):
field.set_data(initial_data, attr)
if field.has_error():
self._all_fields_valid = False
serializer_validated = False
self.add_error(attr, field.get_errors())
continue
if not field.validate():
serializer_validated = False
self.add_error(attr, field.get_errors())
continue
validate_data[attr] = field.data
elif isinstance(field, Serializer):
field.set_initial_data(initial_data, attr)
if field.has_error():
self._all_fields_valid = False
serializer_validated = False
self.add_error(attr, field.get_errors())
continue
elif not field.is_valid():
serializer_validated = False
self.add_error(attr, field.get_errors())
validate_data[attr] = field.validate_data()
continue
elif isinstance(field, ListSerializer):
field.set_initial_data(initial_data, attr)
if field.has_error():
self._all_fields_valid = False
serializer_validated = False
self.add_error(attr, field.get_errors())
continue
elif not field.is_valid():
serializer_validated = False
self.add_error(attr, field.get_errors())
validate_data[attr] = field.validate_data()
continue
return validate_data, serializer_validated
def _check_user_validation(self, data):
try:
before_validation = self.validate(data)
return [], before_validation
except ValidationError as e:
return e.details, data
def validate(self, attr):
return attr
@property
def is_all_fields_valid(self):
return self._all_fields_valid
def set_initial_data(self, data, index):
self._initial_data = None
if not data:
return self
if self._source is not None and self._source in data:
self._initial_data = data[self._source]
elif index in data:
self._initial_data = data[index]
else:
if self._required:
self.add_error(index, "This field is required")
return self
class ListSerializer(BaseSerializer):
def __init__(self, serializer, *args, **kwargs):
self._force_to_list = kwargs.pop("force_to_list", False)
super(ListSerializer, self).__init__(*args, **kwargs)
kwargs.pop("data", False)
self._serializer = serializer
self._validated_data = []
self._errors = []
self._args = args
self._kwargs = kwargs
self._data = []
self._default = []
self._allow_null = True
def add_error(self, value):
self._errors.append(value)
def _can_null(self):
return self._allow_null and self._initial_data is None
def is_valid(self):
assert isinstance(self._initial_data, (list, tuple)) or self._initial_data is None, \
""" _initial_data must be list or tuple but get {data_type}""".format(
data_type=type(self._initial_data).__name__)
if self._initial_data is not None:
for initial_data in self._initial_data:
serializer = self._serializer(data=initial_data, *self._args, **self._kwargs)
if serializer.is_valid():
self._validated_data.append(serializer.validate_data())
self._data.append(serializer.data)
else:
self.add_error(serializer.get_errors())
if not self._force_valid and serializer.validate_data():
self._validated_data.append(serializer.validate_data())
self._data.append(serializer.data)
else:
self.add_error("can not be null !")
return not self.has_error()
@property
def data(self):
return self._data
def set_initial_data(self, data, index):
self._initial_data = None
if not data:
return self
if self._source is not None and self._source in data:
self._initial_data = data[self._source]
elif index in data:
self._initial_data = data[index]
else:
if self._required:
self.add_error("This field is required")
if self._force_to_list and isinstance(self._initial_data, dict):
self._initial_data = [self._initial_data]
if self._allow_null and self._initial_data is None:
self._initial_data = []
return self
class Serializer(SingleSerializer):
def __new__(cls, *args, **kwargs):
many = kwargs.pop("many", False)
cls.fields()
if many:
if hasattr(cls, "Meta") and hasattr(cls.Meta, "list_serializer"):
return cls.Meta.list_serializer(cls, *args, **kwargs)
return ListSerializer(cls, *args, **kwargs)
else:
obj = object.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
class ValidationError(Exception):
def __init__(self, details):
self.details = []
if not isinstance(details, list):
details = [details]
for detail in details:
if not isinstance(detail, dict):
self.details.append({'non_field_error': str(detail)})
else:
self.details.append(detail) | /request-validator-2.0.5.tar.gz/request-validator-2.0.5/request_validator/serializers.py | 0.683102 | 0.158174 | serializers.py | pypi |
import copy
from .validator import *
class Field(object):
def __init__(self, source=None, required=False, many=False, default=None, convert_before_validation=None):
self._source = source
self._data = None
self.data = None
self._many = many
self._errors = []
self._rules = {}
self._required = required
self._default = default
self._convert_before_validation = convert_before_validation
assert self._convert_before_validation is None or \
hasattr(convert_before_validation,
'__call__'), """convert_before_validation must be a closure function but get {}""".format(
type(self._convert_before_validation))
def set_data(self, data, index):
self.data = self._default
self._errors = []
self._data = data
if not self._data:
if self._required:
self._errors.append("This field is required")
return self
if self._source is not None and self._source in self._data:
self.data = self._data[self._source]
elif index in self._data:
self.data = self._data[index]
else:
if self._required:
self._errors.append("This field is required")
if self.data is not None and self._convert_before_validation is not None:
self.data = self._convert_before_validation(self.data)
return self
def is_required(self):
return self._required
def add_rule(self, rule, value=None):
self._rules[rule] = value
def data(self):
return self.data
def get_errors(self):
return self._errors
def has_error(self):
return len(self._errors) != 0
def validate(self):
if not self._many:
for rule, value in self._rules.items():
validator = Validator(self.data, rule, value)
if validator.validate():
self.data = validator.data
continue
else:
self._errors.append(validator.get_message())
else:
assert isinstance(self.data, list) or isinstance(self.data, tuple), \
""" data must be list or tuple but get {data_type}""".format(
data_type=type(self.data).__name__)
for data in self.data:
for rule, value in self._rules.items():
validator = Validator(data, rule, value)
if validator.validate():
continue
else:
self._errors.append(validator.get_message())
return not self.has_error()
class CharField(Field):
def __init__(self, min_length=None, max_length=None, choices=None, allow_blank=False, allow_null=True, *args,
**kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.add_rule(Validator.STRING)
if not allow_blank:
self.add_rule(Validator.NOT_BLANK)
if not allow_null:
self.add_rule(Validator.NOT_NULL)
if min_length is not None:
assert isinstance(min_length, int), \
"""min_length must be integer"""
self.add_rule(Validator.MIN_LEN, min_length)
if max_length is not None:
assert isinstance(max_length, int), \
"""max_length must be integer"""
self.add_rule(Validator.MAX_LEN, max_length)
if choices is not None:
assert isinstance(choices, list) or isinstance(choices, tuple), \
"""choices must be tuple or list"""
self.add_rule(Validator.IN, choices)
class IntField(Field):
def __init__(self, min_value=None, max_value=None, choices=None, allow_null=True, *args,
**kwargs):
super(IntField, self).__init__(*args, **kwargs)
self.add_rule(Validator.INT)
if not allow_null:
self.add_rule(Validator.NOT_NULL)
if min_value is not None:
assert isinstance(min_value, int), \
"""min_value must be integer"""
self.add_rule(Validator.MIN_VALUE, min_value)
if max_value is not None:
assert isinstance(max_value, int), \
"""max_length must be integer"""
self.add_rule(Validator.MAX_LEN, max_value)
if choices is not None:
assert isinstance(choices, list) or isinstance(choices, tuple), \
"""choices must be tuple or list"""
for choice in choices:
assert isinstance(choice, int), \
"""
choices must be list or tuple of integer but get {data_type}
""".format(data_type=type(choice).__name_)
self.add_rule(Validator.IN, choices)
class IntegerField(IntField):
pass
class FloatField(Field):
def __init__(self, min_value=None, max_value=None, choices=None, allow_null=True, *args,
**kwargs):
super(FloatField, self).__init__(*args, **kwargs)
self.add_rule(Validator.FLOAT)
if not allow_null:
self.add_rule(Validator.NOT_NULL)
if min_value is not None:
assert isinstance(min_value, float), \
"""min_value must be integer"""
self.add_rule(Validator.MIN_VALUE, min_value)
if max_value is not None:
assert isinstance(max_value, float), \
"""max_length must be integer"""
self.add_rule(Validator.MAX_LEN, max_value)
if choices is not None:
assert isinstance(choices, list) or isinstance(choices, tuple), \
"""choices must be tuple or list"""
for choice in choices:
assert isinstance(choice, float), \
"""
choices must be list or tuple of integer but get {data_type}
""".format(data_type=type(choice).__name_)
self.add_rule(Validator.IN, choices)
class RegexField(CharField):
def __init__(self, pattern, *args, **kwargs):
super(RegexField, self).__init__(*args, **kwargs)
self.add_rule(Validator.REGEX, pattern)
class DateField(Field):
def __init__(self, format=None, convert_to_date=False, *args, **kwargs):
super(DateField, self).__init__(*args, **kwargs)
if format:
self._format = format
else:
self._format = "%Y-%m-%d"
self.add_rule(Validator.DATE, {"format": self._format, "convert_to_date": convert_to_date})
class DateTimeField(Field):
def __init__(self, format=None, convert_to_datetime=False, *args, **kwargs):
super(DateTimeField, self).__init__(*args, **kwargs)
if format:
self._format = format
else:
self._format = "%Y-%m-%dT%H:%M:%S"
self.add_rule(Validator.DATETIME, {"format": self._format, "convert_to_datetime": convert_to_datetime})
class BooleanField(Field):
def __init__(self, *args, **kwargs):
super(BooleanField, self).__init__(*args, **kwargs)
self.add_rule(Validator.BOOLEAN) | /request-validator-2.0.5.tar.gz/request-validator-2.0.5/request_validator/fields.py | 0.674801 | 0.229859 | fields.py | pypi |
Release History
===============
dev
---
- \[Short description of non-trivial change.\]
2.31.0 (2023-05-22)
-------------------
**Security**
- Versions of Requests between v2.3.0 and v2.30.0 are vulnerable to potential
forwarding of `Proxy-Authorization` headers to destination servers when
following HTTPS redirects.
When proxies are defined with user info (https://user:pass@proxy:8080), Requests
will construct a `Proxy-Authorization` header that is attached to the request to
authenticate with the proxy.
In cases where Requests receives a redirect response, it previously reattached
the `Proxy-Authorization` header incorrectly, resulting in the value being
sent through the tunneled connection to the destination server. Users who rely on
defining their proxy credentials in the URL are *strongly* encouraged to upgrade
to Requests 2.31.0+ to prevent unintentional leakage and rotate their proxy
credentials once the change has been fully deployed.
Users who do not use a proxy or do not supply their proxy credentials through
the user information portion of their proxy URL are not subject to this
vulnerability.
Full details can be read in our [Github Security Advisory](https://github.com/psf/requests/security/advisories/GHSA-j8r2-6x86-q33q)
and [CVE-2023-32681](https://nvd.nist.gov/vuln/detail/CVE-2023-32681).
2.30.0 (2023-05-03)
-------------------
**Dependencies**
- ⚠️ Added support for urllib3 2.0. ⚠️
This may contain minor breaking changes so we advise careful testing and
reviewing https://urllib3.readthedocs.io/en/latest/v2-migration-guide.html
prior to upgrading.
Users who wish to stay on urllib3 1.x can pin to `urllib3<2`.
2.29.0 (2023-04-26)
-------------------
**Improvements**
- Requests now defers chunked requests to the urllib3 implementation to improve
standardization. (#6226)
- Requests relaxes header component requirements to support bytes/str subclasses. (#6356)
2.28.2 (2023-01-12)
-------------------
**Dependencies**
- Requests now supports charset\_normalizer 3.x. (#6261)
**Bugfixes**
- Updated MissingSchema exception to suggest https scheme rather than http. (#6188)
2.28.1 (2022-06-29)
-------------------
**Improvements**
- Speed optimization in `iter_content` with transition to `yield from`. (#6170)
**Dependencies**
- Added support for chardet 5.0.0 (#6179)
- Added support for charset-normalizer 2.1.0 (#6169)
2.28.0 (2022-06-09)
-------------------
**Deprecations**
- ⚠️ Requests has officially dropped support for Python 2.7. ⚠️ (#6091)
- Requests has officially dropped support for Python 3.6 (including pypy3.6). (#6091)
**Improvements**
- Wrap JSON parsing issues in Request's JSONDecodeError for payloads without
an encoding to make `json()` API consistent. (#6097)
- Parse header components consistently, raising an InvalidHeader error in
all invalid cases. (#6154)
- Added provisional 3.11 support with current beta build. (#6155)
- Requests got a makeover and we decided to paint it black. (#6095)
**Bugfixes**
- Fixed bug where setting `CURL_CA_BUNDLE` to an empty string would disable
cert verification. All Requests 2.x versions before 2.28.0 are affected. (#6074)
- Fixed urllib3 exception leak, wrapping `urllib3.exceptions.SSLError` with
`requests.exceptions.SSLError` for `content` and `iter_content`. (#6057)
- Fixed issue where invalid Windows registry entries caused proxy resolution
to raise an exception rather than ignoring the entry. (#6149)
- Fixed issue where entire payload could be included in the error message for
JSONDecodeError. (#6036)
2.27.1 (2022-01-05)
-------------------
**Bugfixes**
- Fixed parsing issue that resulted in the `auth` component being
dropped from proxy URLs. (#6028)
2.27.0 (2022-01-03)
-------------------
**Improvements**
- Officially added support for Python 3.10. (#5928)
- Added a `requests.exceptions.JSONDecodeError` to unify JSON exceptions between
Python 2 and 3. This gets raised in the `response.json()` method, and is
backwards compatible as it inherits from previously thrown exceptions.
Can be caught from `requests.exceptions.RequestException` as well. (#5856)
- Improved error text for misnamed `InvalidSchema` and `MissingSchema`
exceptions. This is a temporary fix until exceptions can be renamed
(Schema->Scheme). (#6017)
- Improved proxy parsing for proxy URLs missing a scheme. This will address
recent changes to `urlparse` in Python 3.9+. (#5917)
**Bugfixes**
- Fixed defect in `extract_zipped_paths` which could result in an infinite loop
for some paths. (#5851)
- Fixed handling for `AttributeError` when calculating length of files obtained
by `Tarfile.extractfile()`. (#5239)
- Fixed urllib3 exception leak, wrapping `urllib3.exceptions.InvalidHeader` with
`requests.exceptions.InvalidHeader`. (#5914)
- Fixed bug where two Host headers were sent for chunked requests. (#5391)
- Fixed regression in Requests 2.26.0 where `Proxy-Authorization` was
incorrectly stripped from all requests sent with `Session.send`. (#5924)
- Fixed performance regression in 2.26.0 for hosts with a large number of
proxies available in the environment. (#5924)
- Fixed idna exception leak, wrapping `UnicodeError` with
`requests.exceptions.InvalidURL` for URLs with a leading dot (.) in the
domain. (#5414)
**Deprecations**
- Requests support for Python 2.7 and 3.6 will be ending in 2022. While we
don't have exact dates, Requests 2.27.x is likely to be the last release
series providing support.
2.26.0 (2021-07-13)
-------------------
**Improvements**
- Requests now supports Brotli compression, if either the `brotli` or
`brotlicffi` package is installed. (#5783)
- `Session.send` now correctly resolves proxy configurations from both
the Session and Request. Behavior now matches `Session.request`. (#5681)
**Bugfixes**
- Fixed a race condition in zip extraction when using Requests in parallel
from zip archive. (#5707)
**Dependencies**
- Instead of `chardet`, use the MIT-licensed `charset_normalizer` for Python3
to remove license ambiguity for projects bundling requests. If `chardet`
is already installed on your machine it will be used instead of `charset_normalizer`
to keep backwards compatibility. (#5797)
You can also install `chardet` while installing requests by
specifying `[use_chardet_on_py3]` extra as follows:
```shell
pip install "requests[use_chardet_on_py3]"
```
Python2 still depends upon the `chardet` module.
- Requests now supports `idna` 3.x on Python 3. `idna` 2.x will continue to
be used on Python 2 installations. (#5711)
**Deprecations**
- The `requests[security]` extra has been converted to a no-op install.
PyOpenSSL is no longer the recommended secure option for Requests. (#5867)
- Requests has officially dropped support for Python 3.5. (#5867)
2.25.1 (2020-12-16)
-------------------
**Bugfixes**
- Requests now treats `application/json` as `utf8` by default. Resolving
inconsistencies between `r.text` and `r.json` output. (#5673)
**Dependencies**
- Requests now supports chardet v4.x.
2.25.0 (2020-11-11)
-------------------
**Improvements**
- Added support for NETRC environment variable. (#5643)
**Dependencies**
- Requests now supports urllib3 v1.26.
**Deprecations**
- Requests v2.25.x will be the last release series with support for Python 3.5.
- The `requests[security]` extra is officially deprecated and will be removed
in Requests v2.26.0.
2.24.0 (2020-06-17)
-------------------
**Improvements**
- pyOpenSSL TLS implementation is now only used if Python
either doesn't have an `ssl` module or doesn't support
SNI. Previously pyOpenSSL was unconditionally used if available.
This applies even if pyOpenSSL is installed via the
`requests[security]` extra (#5443)
- Redirect resolution should now only occur when
`allow_redirects` is True. (#5492)
- No longer perform unnecessary Content-Length calculation for
requests that won't use it. (#5496)
2.23.0 (2020-02-19)
-------------------
**Improvements**
- Remove defunct reference to `prefetch` in Session `__attrs__` (#5110)
**Bugfixes**
- Requests no longer outputs password in basic auth usage warning. (#5099)
**Dependencies**
- Pinning for `chardet` and `idna` now uses major version instead of minor.
This hopefully reduces the need for releases every time a dependency is updated.
2.22.0 (2019-05-15)
-------------------
**Dependencies**
- Requests now supports urllib3 v1.25.2.
(note: 1.25.0 and 1.25.1 are incompatible)
**Deprecations**
- Requests has officially stopped support for Python 3.4.
2.21.0 (2018-12-10)
-------------------
**Dependencies**
- Requests now supports idna v2.8.
2.20.1 (2018-11-08)
-------------------
**Bugfixes**
- Fixed bug with unintended Authorization header stripping for
redirects using default ports (http/80, https/443).
2.20.0 (2018-10-18)
-------------------
**Bugfixes**
- Content-Type header parsing is now case-insensitive (e.g.
charset=utf8 v Charset=utf8).
- Fixed exception leak where certain redirect urls would raise
uncaught urllib3 exceptions.
- Requests removes Authorization header from requests redirected
from https to http on the same hostname. (CVE-2018-18074)
- `should_bypass_proxies` now handles URIs without hostnames (e.g.
files).
**Dependencies**
- Requests now supports urllib3 v1.24.
**Deprecations**
- Requests has officially stopped support for Python 2.6.
2.19.1 (2018-06-14)
-------------------
**Bugfixes**
- Fixed issue where status\_codes.py's `init` function failed trying
to append to a `__doc__` value of `None`.
2.19.0 (2018-06-12)
-------------------
**Improvements**
- Warn user about possible slowdown when using cryptography version
< 1.3.4
- Check for invalid host in proxy URL, before forwarding request to
adapter.
- Fragments are now properly maintained across redirects. (RFC7231
7.1.2)
- Removed use of cgi module to expedite library load time.
- Added support for SHA-256 and SHA-512 digest auth algorithms.
- Minor performance improvement to `Request.content`.
- Migrate to using collections.abc for 3.7 compatibility.
**Bugfixes**
- Parsing empty `Link` headers with `parse_header_links()` no longer
return one bogus entry.
- Fixed issue where loading the default certificate bundle from a zip
archive would raise an `IOError`.
- Fixed issue with unexpected `ImportError` on windows system which do
not support `winreg` module.
- DNS resolution in proxy bypass no longer includes the username and
password in the request. This also fixes the issue of DNS queries
failing on macOS.
- Properly normalize adapter prefixes for url comparison.
- Passing `None` as a file pointer to the `files` param no longer
raises an exception.
- Calling `copy` on a `RequestsCookieJar` will now preserve the cookie
policy correctly.
**Dependencies**
- We now support idna v2.7.
- We now support urllib3 v1.23.
2.18.4 (2017-08-15)
-------------------
**Improvements**
- Error messages for invalid headers now include the header name for
easier debugging
**Dependencies**
- We now support idna v2.6.
2.18.3 (2017-08-02)
-------------------
**Improvements**
- Running `$ python -m requests.help` now includes the installed
version of idna.
**Bugfixes**
- Fixed issue where Requests would raise `ConnectionError` instead of
`SSLError` when encountering SSL problems when using urllib3 v1.22.
2.18.2 (2017-07-25)
-------------------
**Bugfixes**
- `requests.help` no longer fails on Python 2.6 due to the absence of
`ssl.OPENSSL_VERSION_NUMBER`.
**Dependencies**
- We now support urllib3 v1.22.
2.18.1 (2017-06-14)
-------------------
**Bugfixes**
- Fix an error in the packaging whereby the `*.whl` contained
incorrect data that regressed the fix in v2.17.3.
2.18.0 (2017-06-14)
-------------------
**Improvements**
- `Response` is now a context manager, so can be used directly in a
`with` statement without first having to be wrapped by
`contextlib.closing()`.
**Bugfixes**
- Resolve installation failure if multiprocessing is not available
- Resolve tests crash if multiprocessing is not able to determine the
number of CPU cores
- Resolve error swallowing in utils set\_environ generator
2.17.3 (2017-05-29)
-------------------
**Improvements**
- Improved `packages` namespace identity support, for monkeypatching
libraries.
2.17.2 (2017-05-29)
-------------------
**Improvements**
- Improved `packages` namespace identity support, for monkeypatching
libraries.
2.17.1 (2017-05-29)
-------------------
**Improvements**
- Improved `packages` namespace identity support, for monkeypatching
libraries.
2.17.0 (2017-05-29)
-------------------
**Improvements**
- Removal of the 301 redirect cache. This improves thread-safety.
2.16.5 (2017-05-28)
-------------------
- Improvements to `$ python -m requests.help`.
2.16.4 (2017-05-27)
-------------------
- Introduction of the `$ python -m requests.help` command, for
debugging with maintainers!
2.16.3 (2017-05-27)
-------------------
- Further restored the `requests.packages` namespace for compatibility
reasons.
2.16.2 (2017-05-27)
-------------------
- Further restored the `requests.packages` namespace for compatibility
reasons.
No code modification (noted below) should be necessary any longer.
2.16.1 (2017-05-27)
-------------------
- Restored the `requests.packages` namespace for compatibility
reasons.
- Bugfix for `urllib3` version parsing.
**Note**: code that was written to import against the
`requests.packages` namespace previously will have to import code that
rests at this module-level now.
For example:
from requests.packages.urllib3.poolmanager import PoolManager
Will need to be re-written to be:
from requests.packages import urllib3
urllib3.poolmanager.PoolManager
Or, even better:
from urllib3.poolmanager import PoolManager
2.16.0 (2017-05-26)
-------------------
- Unvendor ALL the things!
2.15.1 (2017-05-26)
-------------------
- Everyone makes mistakes.
2.15.0 (2017-05-26)
-------------------
**Improvements**
- Introduction of the `Response.next` property, for getting the next
`PreparedResponse` from a redirect chain (when
`allow_redirects=False`).
- Internal refactoring of `__version__` module.
**Bugfixes**
- Restored once-optional parameter for
`requests.utils.get_environ_proxies()`.
2.14.2 (2017-05-10)
-------------------
**Bugfixes**
- Changed a less-than to an equal-to and an or in the dependency
markers to widen compatibility with older setuptools releases.
2.14.1 (2017-05-09)
-------------------
**Bugfixes**
- Changed the dependency markers to widen compatibility with older pip
releases.
2.14.0 (2017-05-09)
-------------------
**Improvements**
- It is now possible to pass `no_proxy` as a key to the `proxies`
dictionary to provide handling similar to the `NO_PROXY` environment
variable.
- When users provide invalid paths to certificate bundle files or
directories Requests now raises `IOError`, rather than failing at
the time of the HTTPS request with a fairly inscrutable certificate
validation error.
- The behavior of `SessionRedirectMixin` was slightly altered.
`resolve_redirects` will now detect a redirect by calling
`get_redirect_target(response)` instead of directly querying
`Response.is_redirect` and `Response.headers['location']`. Advanced
users will be able to process malformed redirects more easily.
- Changed the internal calculation of elapsed request time to have
higher resolution on Windows.
- Added `win_inet_pton` as conditional dependency for the `[socks]`
extra on Windows with Python 2.7.
- Changed the proxy bypass implementation on Windows: the proxy bypass
check doesn't use forward and reverse DNS requests anymore
- URLs with schemes that begin with `http` but are not `http` or
`https` no longer have their host parts forced to lowercase.
**Bugfixes**
- Much improved handling of non-ASCII `Location` header values in
redirects. Fewer `UnicodeDecodeErrors` are encountered on Python 2,
and Python 3 now correctly understands that Latin-1 is unlikely to
be the correct encoding.
- If an attempt to `seek` file to find out its length fails, we now
appropriately handle that by aborting our content-length
calculations.
- Restricted `HTTPDigestAuth` to only respond to auth challenges made
on 4XX responses, rather than to all auth challenges.
- Fixed some code that was firing `DeprecationWarning` on Python 3.6.
- The dismayed person emoticon (`/o\\`) no longer has a big head. I'm
sure this is what you were all worrying about most.
**Miscellaneous**
- Updated bundled urllib3 to v1.21.1.
- Updated bundled chardet to v3.0.2.
- Updated bundled idna to v2.5.
- Updated bundled certifi to 2017.4.17.
2.13.0 (2017-01-24)
-------------------
**Features**
- Only load the `idna` library when we've determined we need it. This
will save some memory for users.
**Miscellaneous**
- Updated bundled urllib3 to 1.20.
- Updated bundled idna to 2.2.
2.12.5 (2017-01-18)
-------------------
**Bugfixes**
- Fixed an issue with JSON encoding detection, specifically detecting
big-endian UTF-32 with BOM.
2.12.4 (2016-12-14)
-------------------
**Bugfixes**
- Fixed regression from 2.12.2 where non-string types were rejected in
the basic auth parameters. While support for this behaviour has been
re-added, the behaviour is deprecated and will be removed in the
future.
2.12.3 (2016-12-01)
-------------------
**Bugfixes**
- Fixed regression from v2.12.1 for URLs with schemes that begin with
"http". These URLs have historically been processed as though they
were HTTP-schemed URLs, and so have had parameters added. This was
removed in v2.12.2 in an overzealous attempt to resolve problems
with IDNA-encoding those URLs. This change was reverted: the other
fixes for IDNA-encoding have been judged to be sufficient to return
to the behaviour Requests had before v2.12.0.
2.12.2 (2016-11-30)
-------------------
**Bugfixes**
- Fixed several issues with IDNA-encoding URLs that are technically
invalid but which are widely accepted. Requests will now attempt to
IDNA-encode a URL if it can but, if it fails, and the host contains
only ASCII characters, it will be passed through optimistically.
This will allow users to opt-in to using IDNA2003 themselves if they
want to, and will also allow technically invalid but still common
hostnames.
- Fixed an issue where URLs with leading whitespace would raise
`InvalidSchema` errors.
- Fixed an issue where some URLs without the HTTP or HTTPS schemes
would still have HTTP URL preparation applied to them.
- Fixed an issue where Unicode strings could not be used in basic
auth.
- Fixed an issue encountered by some Requests plugins where
constructing a Response object would cause `Response.content` to
raise an `AttributeError`.
2.12.1 (2016-11-16)
-------------------
**Bugfixes**
- Updated setuptools 'security' extra for the new PyOpenSSL backend in
urllib3.
**Miscellaneous**
- Updated bundled urllib3 to 1.19.1.
2.12.0 (2016-11-15)
-------------------
**Improvements**
- Updated support for internationalized domain names from IDNA2003 to
IDNA2008. This updated support is required for several forms of IDNs
and is mandatory for .de domains.
- Much improved heuristics for guessing content lengths: Requests will
no longer read an entire `StringIO` into memory.
- Much improved logic for recalculating `Content-Length` headers for
`PreparedRequest` objects.
- Improved tolerance for file-like objects that have no `tell` method
but do have a `seek` method.
- Anything that is a subclass of `Mapping` is now treated like a
dictionary by the `data=` keyword argument.
- Requests now tolerates empty passwords in proxy credentials, rather
than stripping the credentials.
- If a request is made with a file-like object as the body and that
request is redirected with a 307 or 308 status code, Requests will
now attempt to rewind the body object so it can be replayed.
**Bugfixes**
- When calling `response.close`, the call to `close` will be
propagated through to non-urllib3 backends.
- Fixed issue where the `ALL_PROXY` environment variable would be
preferred over scheme-specific variables like `HTTP_PROXY`.
- Fixed issue where non-UTF8 reason phrases got severely mangled by
falling back to decoding using ISO 8859-1 instead.
- Fixed a bug where Requests would not correctly correlate cookies set
when using custom Host headers if those Host headers did not use the
native string type for the platform.
**Miscellaneous**
- Updated bundled urllib3 to 1.19.
- Updated bundled certifi certs to 2016.09.26.
2.11.1 (2016-08-17)
-------------------
**Bugfixes**
- Fixed a bug when using `iter_content` with `decode_unicode=True` for
streamed bodies would raise `AttributeError`. This bug was
introduced in 2.11.
- Strip Content-Type and Transfer-Encoding headers from the header
block when following a redirect that transforms the verb from
POST/PUT to GET.
2.11.0 (2016-08-08)
-------------------
**Improvements**
- Added support for the `ALL_PROXY` environment variable.
- Reject header values that contain leading whitespace or newline
characters to reduce risk of header smuggling.
**Bugfixes**
- Fixed occasional `TypeError` when attempting to decode a JSON
response that occurred in an error case. Now correctly returns a
`ValueError`.
- Requests would incorrectly ignore a non-CIDR IP address in the
`NO_PROXY` environment variables: Requests now treats it as a
specific IP.
- Fixed a bug when sending JSON data that could cause us to encounter
obscure OpenSSL errors in certain network conditions (yes, really).
- Added type checks to ensure that `iter_content` only accepts
integers and `None` for chunk sizes.
- Fixed issue where responses whose body had not been fully consumed
would have the underlying connection closed but not returned to the
connection pool, which could cause Requests to hang in situations
where the `HTTPAdapter` had been configured to use a blocking
connection pool.
**Miscellaneous**
- Updated bundled urllib3 to 1.16.
- Some previous releases accidentally accepted non-strings as
acceptable header values. This release does not.
2.10.0 (2016-04-29)
-------------------
**New Features**
- SOCKS Proxy Support! (requires PySocks;
`$ pip install requests[socks]`)
**Miscellaneous**
- Updated bundled urllib3 to 1.15.1.
2.9.2 (2016-04-29)
------------------
**Improvements**
- Change built-in CaseInsensitiveDict (used for headers) to use
OrderedDict as its underlying datastore.
**Bugfixes**
- Don't use redirect\_cache if allow\_redirects=False
- When passed objects that throw exceptions from `tell()`, send them
via chunked transfer encoding instead of failing.
- Raise a ProxyError for proxy related connection issues.
2.9.1 (2015-12-21)
------------------
**Bugfixes**
- Resolve regression introduced in 2.9.0 that made it impossible to
send binary strings as bodies in Python 3.
- Fixed errors when calculating cookie expiration dates in certain
locales.
**Miscellaneous**
- Updated bundled urllib3 to 1.13.1.
2.9.0 (2015-12-15)
------------------
**Minor Improvements** (Backwards compatible)
- The `verify` keyword argument now supports being passed a path to a
directory of CA certificates, not just a single-file bundle.
- Warnings are now emitted when sending files opened in text mode.
- Added the 511 Network Authentication Required status code to the
status code registry.
**Bugfixes**
- For file-like objects that are not sought to the very beginning, we
now send the content length for the number of bytes we will actually
read, rather than the total size of the file, allowing partial file
uploads.
- When uploading file-like objects, if they are empty or have no
obvious content length we set `Transfer-Encoding: chunked` rather
than `Content-Length: 0`.
- We correctly receive the response in buffered mode when uploading
chunked bodies.
- We now handle being passed a query string as a bytestring on Python
3, by decoding it as UTF-8.
- Sessions are now closed in all cases (exceptional and not) when
using the functional API rather than leaking and waiting for the
garbage collector to clean them up.
- Correctly handle digest auth headers with a malformed `qop`
directive that contains no token, by treating it the same as if no
`qop` directive was provided at all.
- Minor performance improvements when removing specific cookies by
name.
**Miscellaneous**
- Updated urllib3 to 1.13.
2.8.1 (2015-10-13)
------------------
**Bugfixes**
- Update certificate bundle to match `certifi` 2015.9.6.2's weak
certificate bundle.
- Fix a bug in 2.8.0 where requests would raise `ConnectTimeout`
instead of `ConnectionError`
- When using the PreparedRequest flow, requests will now correctly
respect the `json` parameter. Broken in 2.8.0.
- When using the PreparedRequest flow, requests will now correctly
handle a Unicode-string method name on Python 2. Broken in 2.8.0.
2.8.0 (2015-10-05)
------------------
**Minor Improvements** (Backwards Compatible)
- Requests now supports per-host proxies. This allows the `proxies`
dictionary to have entries of the form
`{'<scheme>://<hostname>': '<proxy>'}`. Host-specific proxies will
be used in preference to the previously-supported scheme-specific
ones, but the previous syntax will continue to work.
- `Response.raise_for_status` now prints the URL that failed as part
of the exception message.
- `requests.utils.get_netrc_auth` now takes an `raise_errors` kwarg,
defaulting to `False`. When `True`, errors parsing `.netrc` files
cause exceptions to be thrown.
- Change to bundled projects import logic to make it easier to
unbundle requests downstream.
- Changed the default User-Agent string to avoid leaking data on
Linux: now contains only the requests version.
**Bugfixes**
- The `json` parameter to `post()` and friends will now only be used
if neither `data` nor `files` are present, consistent with the
documentation.
- We now ignore empty fields in the `NO_PROXY` environment variable.
- Fixed problem where `httplib.BadStatusLine` would get raised if
combining `stream=True` with `contextlib.closing`.
- Prevented bugs where we would attempt to return the same connection
back to the connection pool twice when sending a Chunked body.
- Miscellaneous minor internal changes.
- Digest Auth support is now thread safe.
**Updates**
- Updated urllib3 to 1.12.
2.7.0 (2015-05-03)
------------------
This is the first release that follows our new release process. For
more, see [our
documentation](https://requests.readthedocs.io/en/latest/community/release-process/).
**Bugfixes**
- Updated urllib3 to 1.10.4, resolving several bugs involving chunked
transfer encoding and response framing.
2.6.2 (2015-04-23)
------------------
**Bugfixes**
- Fix regression where compressed data that was sent as chunked data
was not properly decompressed. (\#2561)
2.6.1 (2015-04-22)
------------------
**Bugfixes**
- Remove VendorAlias import machinery introduced in v2.5.2.
- Simplify the PreparedRequest.prepare API: We no longer require the
user to pass an empty list to the hooks keyword argument. (c.f.
\#2552)
- Resolve redirects now receives and forwards all of the original
arguments to the adapter. (\#2503)
- Handle UnicodeDecodeErrors when trying to deal with a unicode URL
that cannot be encoded in ASCII. (\#2540)
- Populate the parsed path of the URI field when performing Digest
Authentication. (\#2426)
- Copy a PreparedRequest's CookieJar more reliably when it is not an
instance of RequestsCookieJar. (\#2527)
2.6.0 (2015-03-14)
------------------
**Bugfixes**
- CVE-2015-2296: Fix handling of cookies on redirect. Previously a
cookie without a host value set would use the hostname for the
redirected URL exposing requests users to session fixation attacks
and potentially cookie stealing. This was disclosed privately by
Matthew Daley of [BugFuzz](https://bugfuzz.com). This affects all
versions of requests from v2.1.0 to v2.5.3 (inclusive on both ends).
- Fix error when requests is an `install_requires` dependency and
`python setup.py test` is run. (\#2462)
- Fix error when urllib3 is unbundled and requests continues to use
the vendored import location.
- Include fixes to `urllib3`'s header handling.
- Requests' handling of unvendored dependencies is now more
restrictive.
**Features and Improvements**
- Support bytearrays when passed as parameters in the `files`
argument. (\#2468)
- Avoid data duplication when creating a request with `str`, `bytes`,
or `bytearray` input to the `files` argument.
2.5.3 (2015-02-24)
------------------
**Bugfixes**
- Revert changes to our vendored certificate bundle. For more context
see (\#2455, \#2456, and <https://bugs.python.org/issue23476>)
2.5.2 (2015-02-23)
------------------
**Features and Improvements**
- Add sha256 fingerprint support.
([shazow/urllib3\#540](https://github.com/shazow/urllib3/pull/540))
- Improve the performance of headers.
([shazow/urllib3\#544](https://github.com/shazow/urllib3/pull/544))
**Bugfixes**
- Copy pip's import machinery. When downstream redistributors remove
requests.packages.urllib3 the import machinery will continue to let
those same symbols work. Example usage in requests' documentation
and 3rd-party libraries relying on the vendored copies of urllib3
will work without having to fallback to the system urllib3.
- Attempt to quote parts of the URL on redirect if unquoting and then
quoting fails. (\#2356)
- Fix filename type check for multipart form-data uploads. (\#2411)
- Properly handle the case where a server issuing digest
authentication challenges provides both auth and auth-int
qop-values. (\#2408)
- Fix a socket leak.
([shazow/urllib3\#549](https://github.com/shazow/urllib3/pull/549))
- Fix multiple `Set-Cookie` headers properly.
([shazow/urllib3\#534](https://github.com/shazow/urllib3/pull/534))
- Disable the built-in hostname verification.
([shazow/urllib3\#526](https://github.com/shazow/urllib3/pull/526))
- Fix the behaviour of decoding an exhausted stream.
([shazow/urllib3\#535](https://github.com/shazow/urllib3/pull/535))
**Security**
- Pulled in an updated `cacert.pem`.
- Drop RC4 from the default cipher list.
([shazow/urllib3\#551](https://github.com/shazow/urllib3/pull/551))
2.5.1 (2014-12-23)
------------------
**Behavioural Changes**
- Only catch HTTPErrors in raise\_for\_status (\#2382)
**Bugfixes**
- Handle LocationParseError from urllib3 (\#2344)
- Handle file-like object filenames that are not strings (\#2379)
- Unbreak HTTPDigestAuth handler. Allow new nonces to be negotiated
(\#2389)
2.5.0 (2014-12-01)
------------------
**Improvements**
- Allow usage of urllib3's Retry object with HTTPAdapters (\#2216)
- The `iter_lines` method on a response now accepts a delimiter with
which to split the content (\#2295)
**Behavioural Changes**
- Add deprecation warnings to functions in requests.utils that will be
removed in 3.0 (\#2309)
- Sessions used by the functional API are always closed (\#2326)
- Restrict requests to HTTP/1.1 and HTTP/1.0 (stop accepting HTTP/0.9)
(\#2323)
**Bugfixes**
- Only parse the URL once (\#2353)
- Allow Content-Length header to always be overridden (\#2332)
- Properly handle files in HTTPDigestAuth (\#2333)
- Cap redirect\_cache size to prevent memory abuse (\#2299)
- Fix HTTPDigestAuth handling of redirects after authenticating
successfully (\#2253)
- Fix crash with custom method parameter to Session.request (\#2317)
- Fix how Link headers are parsed using the regular expression library
(\#2271)
**Documentation**
- Add more references for interlinking (\#2348)
- Update CSS for theme (\#2290)
- Update width of buttons and sidebar (\#2289)
- Replace references of Gittip with Gratipay (\#2282)
- Add link to changelog in sidebar (\#2273)
2.4.3 (2014-10-06)
------------------
**Bugfixes**
- Unicode URL improvements for Python 2.
- Re-order JSON param for backwards compat.
- Automatically defrag authentication schemes from host/pass URIs.
([\#2249](https://github.com/psf/requests/issues/2249))
2.4.2 (2014-10-05)
------------------
**Improvements**
- FINALLY! Add json parameter for uploads!
([\#2258](https://github.com/psf/requests/pull/2258))
- Support for bytestring URLs on Python 3.x
([\#2238](https://github.com/psf/requests/pull/2238))
**Bugfixes**
- Avoid getting stuck in a loop
([\#2244](https://github.com/psf/requests/pull/2244))
- Multiple calls to iter\* fail with unhelpful error.
([\#2240](https://github.com/psf/requests/issues/2240),
[\#2241](https://github.com/psf/requests/issues/2241))
**Documentation**
- Correct redirection introduction
([\#2245](https://github.com/psf/requests/pull/2245/))
- Added example of how to send multiple files in one request.
([\#2227](https://github.com/psf/requests/pull/2227/))
- Clarify how to pass a custom set of CAs
([\#2248](https://github.com/psf/requests/pull/2248/))
2.4.1 (2014-09-09)
------------------
- Now has a "security" package extras set,
`$ pip install requests[security]`
- Requests will now use Certifi if it is available.
- Capture and re-raise urllib3 ProtocolError
- Bugfix for responses that attempt to redirect to themselves forever
(wtf?).
2.4.0 (2014-08-29)
------------------
**Behavioral Changes**
- `Connection: keep-alive` header is now sent automatically.
**Improvements**
- Support for connect timeouts! Timeout now accepts a tuple (connect,
read) which is used to set individual connect and read timeouts.
- Allow copying of PreparedRequests without headers/cookies.
- Updated bundled urllib3 version.
- Refactored settings loading from environment -- new
Session.merge\_environment\_settings.
- Handle socket errors in iter\_content.
2.3.0 (2014-05-16)
------------------
**API Changes**
- New `Response` property `is_redirect`, which is true when the
library could have processed this response as a redirection (whether
or not it actually did).
- The `timeout` parameter now affects requests with both `stream=True`
and `stream=False` equally.
- The change in v2.0.0 to mandate explicit proxy schemes has been
reverted. Proxy schemes now default to `http://`.
- The `CaseInsensitiveDict` used for HTTP headers now behaves like a
normal dictionary when references as string or viewed in the
interpreter.
**Bugfixes**
- No longer expose Authorization or Proxy-Authorization headers on
redirect. Fix CVE-2014-1829 and CVE-2014-1830 respectively.
- Authorization is re-evaluated each redirect.
- On redirect, pass url as native strings.
- Fall-back to autodetected encoding for JSON when Unicode detection
fails.
- Headers set to `None` on the `Session` are now correctly not sent.
- Correctly honor `decode_unicode` even if it wasn't used earlier in
the same response.
- Stop advertising `compress` as a supported Content-Encoding.
- The `Response.history` parameter is now always a list.
- Many, many `urllib3` bugfixes.
2.2.1 (2014-01-23)
------------------
**Bugfixes**
- Fixes incorrect parsing of proxy credentials that contain a literal
or encoded '\#' character.
- Assorted urllib3 fixes.
2.2.0 (2014-01-09)
------------------
**API Changes**
- New exception: `ContentDecodingError`. Raised instead of `urllib3`
`DecodeError` exceptions.
**Bugfixes**
- Avoid many many exceptions from the buggy implementation of
`proxy_bypass` on OS X in Python 2.6.
- Avoid crashing when attempting to get authentication credentials
from \~/.netrc when running as a user without a home directory.
- Use the correct pool size for pools of connections to proxies.
- Fix iteration of `CookieJar` objects.
- Ensure that cookies are persisted over redirect.
- Switch back to using chardet, since it has merged with charade.
2.1.0 (2013-12-05)
------------------
- Updated CA Bundle, of course.
- Cookies set on individual Requests through a `Session` (e.g. via
`Session.get()`) are no longer persisted to the `Session`.
- Clean up connections when we hit problems during chunked upload,
rather than leaking them.
- Return connections to the pool when a chunked upload is successful,
rather than leaking it.
- Match the HTTPbis recommendation for HTTP 301 redirects.
- Prevent hanging when using streaming uploads and Digest Auth when a
401 is received.
- Values of headers set by Requests are now always the native string
type.
- Fix previously broken SNI support.
- Fix accessing HTTP proxies using proxy authentication.
- Unencode HTTP Basic usernames and passwords extracted from URLs.
- Support for IP address ranges for no\_proxy environment variable
- Parse headers correctly when users override the default `Host:`
header.
- Avoid munging the URL in case of case-sensitive servers.
- Looser URL handling for non-HTTP/HTTPS urls.
- Accept unicode methods in Python 2.6 and 2.7.
- More resilient cookie handling.
- Make `Response` objects pickleable.
- Actually added MD5-sess to Digest Auth instead of pretending to like
last time.
- Updated internal urllib3.
- Fixed @Lukasa's lack of taste.
2.0.1 (2013-10-24)
------------------
- Updated included CA Bundle with new mistrusts and automated process
for the future
- Added MD5-sess to Digest Auth
- Accept per-file headers in multipart file POST messages.
- Fixed: Don't send the full URL on CONNECT messages.
- Fixed: Correctly lowercase a redirect scheme.
- Fixed: Cookies not persisted when set via functional API.
- Fixed: Translate urllib3 ProxyError into a requests ProxyError
derived from ConnectionError.
- Updated internal urllib3 and chardet.
2.0.0 (2013-09-24)
------------------
**API Changes:**
- Keys in the Headers dictionary are now native strings on all Python
versions, i.e. bytestrings on Python 2, unicode on Python 3.
- Proxy URLs now *must* have an explicit scheme. A `MissingSchema`
exception will be raised if they don't.
- Timeouts now apply to read time if `Stream=False`.
- `RequestException` is now a subclass of `IOError`, not
`RuntimeError`.
- Added new method to `PreparedRequest` objects:
`PreparedRequest.copy()`.
- Added new method to `Session` objects: `Session.update_request()`.
This method updates a `Request` object with the data (e.g. cookies)
stored on the `Session`.
- Added new method to `Session` objects: `Session.prepare_request()`.
This method updates and prepares a `Request` object, and returns the
corresponding `PreparedRequest` object.
- Added new method to `HTTPAdapter` objects:
`HTTPAdapter.proxy_headers()`. This should not be called directly,
but improves the subclass interface.
- `httplib.IncompleteRead` exceptions caused by incorrect chunked
encoding will now raise a Requests `ChunkedEncodingError` instead.
- Invalid percent-escape sequences now cause a Requests `InvalidURL`
exception to be raised.
- HTTP 208 no longer uses reason phrase `"im_used"`. Correctly uses
`"already_reported"`.
- HTTP 226 reason added (`"im_used"`).
**Bugfixes:**
- Vastly improved proxy support, including the CONNECT verb. Special
thanks to the many contributors who worked towards this improvement.
- Cookies are now properly managed when 401 authentication responses
are received.
- Chunked encoding fixes.
- Support for mixed case schemes.
- Better handling of streaming downloads.
- Retrieve environment proxies from more locations.
- Minor cookies fixes.
- Improved redirect behaviour.
- Improved streaming behaviour, particularly for compressed data.
- Miscellaneous small Python 3 text encoding bugs.
- `.netrc` no longer overrides explicit auth.
- Cookies set by hooks are now correctly persisted on Sessions.
- Fix problem with cookies that specify port numbers in their host
field.
- `BytesIO` can be used to perform streaming uploads.
- More generous parsing of the `no_proxy` environment variable.
- Non-string objects can be passed in data values alongside files.
1.2.3 (2013-05-25)
------------------
- Simple packaging fix
1.2.2 (2013-05-23)
------------------
- Simple packaging fix
1.2.1 (2013-05-20)
------------------
- 301 and 302 redirects now change the verb to GET for all verbs, not
just POST, improving browser compatibility.
- Python 3.3.2 compatibility
- Always percent-encode location headers
- Fix connection adapter matching to be most-specific first
- new argument to the default connection adapter for passing a block
argument
- prevent a KeyError when there's no link headers
1.2.0 (2013-03-31)
------------------
- Fixed cookies on sessions and on requests
- Significantly change how hooks are dispatched - hooks now receive
all the arguments specified by the user when making a request so
hooks can make a secondary request with the same parameters. This is
especially necessary for authentication handler authors
- certifi support was removed
- Fixed bug where using OAuth 1 with body `signature_type` sent no
data
- Major proxy work thanks to @Lukasa including parsing of proxy
authentication from the proxy url
- Fix DigestAuth handling too many 401s
- Update vendored urllib3 to include SSL bug fixes
- Allow keyword arguments to be passed to `json.loads()` via the
`Response.json()` method
- Don't send `Content-Length` header by default on `GET` or `HEAD`
requests
- Add `elapsed` attribute to `Response` objects to time how long a
request took.
- Fix `RequestsCookieJar`
- Sessions and Adapters are now picklable, i.e., can be used with the
multiprocessing library
- Update charade to version 1.0.3
The change in how hooks are dispatched will likely cause a great deal of
issues.
1.1.0 (2013-01-10)
------------------
- CHUNKED REQUESTS
- Support for iterable response bodies
- Assume servers persist redirect params
- Allow explicit content types to be specified for file data
- Make merge\_kwargs case-insensitive when looking up keys
1.0.3 (2012-12-18)
------------------
- Fix file upload encoding bug
- Fix cookie behavior
1.0.2 (2012-12-17)
------------------
- Proxy fix for HTTPAdapter.
1.0.1 (2012-12-17)
------------------
- Cert verification exception bug.
- Proxy fix for HTTPAdapter.
1.0.0 (2012-12-17)
------------------
- Massive Refactor and Simplification
- Switch to Apache 2.0 license
- Swappable Connection Adapters
- Mountable Connection Adapters
- Mutable ProcessedRequest chain
- /s/prefetch/stream
- Removal of all configuration
- Standard library logging
- Make Response.json() callable, not property.
- Usage of new charade project, which provides python 2 and 3
simultaneous chardet.
- Removal of all hooks except 'response'
- Removal of all authentication helpers (OAuth, Kerberos)
This is not a backwards compatible change.
0.14.2 (2012-10-27)
-------------------
- Improved mime-compatible JSON handling
- Proxy fixes
- Path hack fixes
- Case-Insensitive Content-Encoding headers
- Support for CJK parameters in form posts
0.14.1 (2012-10-01)
-------------------
- Python 3.3 Compatibility
- Simply default accept-encoding
- Bugfixes
0.14.0 (2012-09-02)
-------------------
- No more iter\_content errors if already downloaded.
0.13.9 (2012-08-25)
-------------------
- Fix for OAuth + POSTs
- Remove exception eating from dispatch\_hook
- General bugfixes
0.13.8 (2012-08-21)
-------------------
- Incredible Link header support :)
0.13.7 (2012-08-19)
-------------------
- Support for (key, value) lists everywhere.
- Digest Authentication improvements.
- Ensure proxy exclusions work properly.
- Clearer UnicodeError exceptions.
- Automatic casting of URLs to strings (fURL and such)
- Bugfixes.
0.13.6 (2012-08-06)
-------------------
- Long awaited fix for hanging connections!
0.13.5 (2012-07-27)
-------------------
- Packaging fix
0.13.4 (2012-07-27)
-------------------
- GSSAPI/Kerberos authentication!
- App Engine 2.7 Fixes!
- Fix leaking connections (from urllib3 update)
- OAuthlib path hack fix
- OAuthlib URL parameters fix.
0.13.3 (2012-07-12)
-------------------
- Use simplejson if available.
- Do not hide SSLErrors behind Timeouts.
- Fixed param handling with urls containing fragments.
- Significantly improved information in User Agent.
- client certificates are ignored when verify=False
0.13.2 (2012-06-28)
-------------------
- Zero dependencies (once again)!
- New: Response.reason
- Sign querystring parameters in OAuth 1.0
- Client certificates no longer ignored when verify=False
- Add openSUSE certificate support
0.13.1 (2012-06-07)
-------------------
- Allow passing a file or file-like object as data.
- Allow hooks to return responses that indicate errors.
- Fix Response.text and Response.json for body-less responses.
0.13.0 (2012-05-29)
-------------------
- Removal of Requests.async in favor of
[grequests](https://github.com/kennethreitz/grequests)
- Allow disabling of cookie persistence.
- New implementation of safe\_mode
- cookies.get now supports default argument
- Session cookies not saved when Session.request is called with
return\_response=False
- Env: no\_proxy support.
- RequestsCookieJar improvements.
- Various bug fixes.
0.12.1 (2012-05-08)
-------------------
- New `Response.json` property.
- Ability to add string file uploads.
- Fix out-of-range issue with iter\_lines.
- Fix iter\_content default size.
- Fix POST redirects containing files.
0.12.0 (2012-05-02)
-------------------
- EXPERIMENTAL OAUTH SUPPORT!
- Proper CookieJar-backed cookies interface with awesome dict-like
interface.
- Speed fix for non-iterated content chunks.
- Move `pre_request` to a more usable place.
- New `pre_send` hook.
- Lazily encode data, params, files.
- Load system Certificate Bundle if `certify` isn't available.
- Cleanups, fixes.
0.11.2 (2012-04-22)
-------------------
- Attempt to use the OS's certificate bundle if `certifi` isn't
available.
- Infinite digest auth redirect fix.
- Multi-part file upload improvements.
- Fix decoding of invalid %encodings in URLs.
- If there is no content in a response don't throw an error the second
time that content is attempted to be read.
- Upload data on redirects.
0.11.1 (2012-03-30)
-------------------
- POST redirects now break RFC to do what browsers do: Follow up with
a GET.
- New `strict_mode` configuration to disable new redirect behavior.
0.11.0 (2012-03-14)
-------------------
- Private SSL Certificate support
- Remove select.poll from Gevent monkeypatching
- Remove redundant generator for chunked transfer encoding
- Fix: Response.ok raises Timeout Exception in safe\_mode
0.10.8 (2012-03-09)
-------------------
- Generate chunked ValueError fix
- Proxy configuration by environment variables
- Simplification of iter\_lines.
- New trust\_env configuration for disabling system/environment hints.
- Suppress cookie errors.
0.10.7 (2012-03-07)
-------------------
- encode\_uri = False
0.10.6 (2012-02-25)
-------------------
- Allow '=' in cookies.
0.10.5 (2012-02-25)
-------------------
- Response body with 0 content-length fix.
- New async.imap.
- Don't fail on netrc.
0.10.4 (2012-02-20)
-------------------
- Honor netrc.
0.10.3 (2012-02-20)
-------------------
- HEAD requests don't follow redirects anymore.
- raise\_for\_status() doesn't raise for 3xx anymore.
- Make Session objects picklable.
- ValueError for invalid schema URLs.
0.10.2 (2012-01-15)
-------------------
- Vastly improved URL quoting.
- Additional allowed cookie key values.
- Attempted fix for "Too many open files" Error
- Replace unicode errors on first pass, no need for second pass.
- Append '/' to bare-domain urls before query insertion.
- Exceptions now inherit from RuntimeError.
- Binary uploads + auth fix.
- Bugfixes.
0.10.1 (2012-01-23)
-------------------
- PYTHON 3 SUPPORT!
- Dropped 2.5 Support. (*Backwards Incompatible*)
0.10.0 (2012-01-21)
-------------------
- `Response.content` is now bytes-only. (*Backwards Incompatible*)
- New `Response.text` is unicode-only.
- If no `Response.encoding` is specified and `chardet` is available,
`Response.text` will guess an encoding.
- Default to ISO-8859-1 (Western) encoding for "text" subtypes.
- Removal of decode\_unicode. (*Backwards Incompatible*)
- New multiple-hooks system.
- New `Response.register_hook` for registering hooks within the
pipeline.
- `Response.url` is now Unicode.
0.9.3 (2012-01-18)
------------------
- SSL verify=False bugfix (apparent on windows machines).
0.9.2 (2012-01-18)
------------------
- Asynchronous async.send method.
- Support for proper chunk streams with boundaries.
- session argument for Session classes.
- Print entire hook tracebacks, not just exception instance.
- Fix response.iter\_lines from pending next line.
- Fix but in HTTP-digest auth w/ URI having query strings.
- Fix in Event Hooks section.
- Urllib3 update.
0.9.1 (2012-01-06)
------------------
- danger\_mode for automatic Response.raise\_for\_status()
- Response.iter\_lines refactor
0.9.0 (2011-12-28)
------------------
- verify ssl is default.
0.8.9 (2011-12-28)
------------------
- Packaging fix.
0.8.8 (2011-12-28)
------------------
- SSL CERT VERIFICATION!
- Release of Cerifi: Mozilla's cert list.
- New 'verify' argument for SSL requests.
- Urllib3 update.
0.8.7 (2011-12-24)
------------------
- iter\_lines last-line truncation fix
- Force safe\_mode for async requests
- Handle safe\_mode exceptions more consistently
- Fix iteration on null responses in safe\_mode
0.8.6 (2011-12-18)
------------------
- Socket timeout fixes.
- Proxy Authorization support.
0.8.5 (2011-12-14)
------------------
- Response.iter\_lines!
0.8.4 (2011-12-11)
------------------
- Prefetch bugfix.
- Added license to installed version.
0.8.3 (2011-11-27)
------------------
- Converted auth system to use simpler callable objects.
- New session parameter to API methods.
- Display full URL while logging.
0.8.2 (2011-11-19)
------------------
- New Unicode decoding system, based on over-ridable
Response.encoding.
- Proper URL slash-quote handling.
- Cookies with `[`, `]`, and `_` allowed.
0.8.1 (2011-11-15)
------------------
- URL Request path fix
- Proxy fix.
- Timeouts fix.
0.8.0 (2011-11-13)
------------------
- Keep-alive support!
- Complete removal of Urllib2
- Complete removal of Poster
- Complete removal of CookieJars
- New ConnectionError raising
- Safe\_mode for error catching
- prefetch parameter for request methods
- OPTION method
- Async pool size throttling
- File uploads send real names
- Vendored in urllib3
0.7.6 (2011-11-07)
------------------
- Digest authentication bugfix (attach query data to path)
0.7.5 (2011-11-04)
------------------
- Response.content = None if there was an invalid response.
- Redirection auth handling.
0.7.4 (2011-10-26)
------------------
- Session Hooks fix.
0.7.3 (2011-10-23)
------------------
- Digest Auth fix.
0.7.2 (2011-10-23)
------------------
- PATCH Fix.
0.7.1 (2011-10-23)
------------------
- Move away from urllib2 authentication handling.
- Fully Remove AuthManager, AuthObject, &c.
- New tuple-based auth system with handler callbacks.
0.7.0 (2011-10-22)
------------------
- Sessions are now the primary interface.
- Deprecated InvalidMethodException.
- PATCH fix.
- New config system (no more global settings).
0.6.6 (2011-10-19)
------------------
- Session parameter bugfix (params merging).
0.6.5 (2011-10-18)
------------------
- Offline (fast) test suite.
- Session dictionary argument merging.
0.6.4 (2011-10-13)
------------------
- Automatic decoding of unicode, based on HTTP Headers.
- New `decode_unicode` setting.
- Removal of `r.read/close` methods.
- New `r.faw` interface for advanced response usage.\*
- Automatic expansion of parameterized headers.
0.6.3 (2011-10-13)
------------------
- Beautiful `requests.async` module, for making async requests w/
gevent.
0.6.2 (2011-10-09)
------------------
- GET/HEAD obeys allow\_redirects=False.
0.6.1 (2011-08-20)
------------------
- Enhanced status codes experience `\o/`
- Set a maximum number of redirects (`settings.max_redirects`)
- Full Unicode URL support
- Support for protocol-less redirects.
- Allow for arbitrary request types.
- Bugfixes
0.6.0 (2011-08-17)
------------------
- New callback hook system
- New persistent sessions object and context manager
- Transparent Dict-cookie handling
- Status code reference object
- Removed Response.cached
- Added Response.request
- All args are kwargs
- Relative redirect support
- HTTPError handling improvements
- Improved https testing
- Bugfixes
0.5.1 (2011-07-23)
------------------
- International Domain Name Support!
- Access headers without fetching entire body (`read()`)
- Use lists as dicts for parameters
- Add Forced Basic Authentication
- Forced Basic is default authentication type
- `python-requests.org` default User-Agent header
- CaseInsensitiveDict lower-case caching
- Response.history bugfix
0.5.0 (2011-06-21)
------------------
- PATCH Support
- Support for Proxies
- HTTPBin Test Suite
- Redirect Fixes
- settings.verbose stream writing
- Querystrings for all methods
- URLErrors (Connection Refused, Timeout, Invalid URLs) are treated as
explicitly raised
`r.requests.get('hwe://blah'); r.raise_for_status()`
0.4.1 (2011-05-22)
------------------
- Improved Redirection Handling
- New 'allow\_redirects' param for following non-GET/HEAD Redirects
- Settings module refactoring
0.4.0 (2011-05-15)
------------------
- Response.history: list of redirected responses
- Case-Insensitive Header Dictionaries!
- Unicode URLs
0.3.4 (2011-05-14)
------------------
- Urllib2 HTTPAuthentication Recursion fix (Basic/Digest)
- Internal Refactor
- Bytes data upload Bugfix
0.3.3 (2011-05-12)
------------------
- Request timeouts
- Unicode url-encoded data
- Settings context manager and module
0.3.2 (2011-04-15)
------------------
- Automatic Decompression of GZip Encoded Content
- AutoAuth Support for Tupled HTTP Auth
0.3.1 (2011-04-01)
------------------
- Cookie Changes
- Response.read()
- Poster fix
0.3.0 (2011-02-25)
------------------
- Automatic Authentication API Change
- Smarter Query URL Parameterization
- Allow file uploads and POST data together
-
New Authentication Manager System
: - Simpler Basic HTTP System
- Supports all built-in urllib2 Auths
- Allows for custom Auth Handlers
0.2.4 (2011-02-19)
------------------
- Python 2.5 Support
- PyPy-c v1.4 Support
- Auto-Authentication tests
- Improved Request object constructor
0.2.3 (2011-02-15)
------------------
-
New HTTPHandling Methods
: - Response.\_\_nonzero\_\_ (false if bad HTTP Status)
- Response.ok (True if expected HTTP Status)
- Response.error (Logged HTTPError if bad HTTP Status)
- Response.raise\_for\_status() (Raises stored HTTPError)
0.2.2 (2011-02-14)
------------------
- Still handles request in the event of an HTTPError. (Issue \#2)
- Eventlet and Gevent Monkeypatch support.
- Cookie Support (Issue \#1)
0.2.1 (2011-02-14)
------------------
- Added file attribute to POST and PUT requests for multipart-encode
file uploads.
- Added Request.url attribute for context and redirects
0.2.0 (2011-02-14)
------------------
- Birth!
0.0.1 (2011-02-13)
------------------
- Frustration
- Conception
| /request3-2.19.0.tar.gz/request3-2.19.0/HISTORY.md | 0.829906 | 0.803444 | HISTORY.md | pypi |
# RequestInjector
scan a URL using one or more given wordlists with optional URL transformations
### What is RequestInjector?
This tool scans a single URL at a time, using wordlists to try various path combinations and key/value query pairs. RequestInjector is a single standalone script that can be kept in a tools folder until needed, or installed directly via pip and accessed directly from $PATH.
- in `path` mode (`-m path`), try all words against a URL path, with optional mutations
- given the URL "http://example.com/somepath/a/b/c", a wordlist to pull terms from, and -m/--mutate specified, worker threads will try each mutation of the URL and the current term (WORD):
- "http://example.com/WORD", "http://example.com/somepath/WORD", "http://example.com/somepath/a/WORD", "http://example.com/somepath/a/b/WORD", "http://example.com/somepath/a/b/c/WORD"
- in `arg` mode (`-m arg`), try all words against a specified set of keys
- using the `shotgun` attacktype (`-T shotgun`), provide a single wordlist against one or more keys (similar to Burp Suite's Intruder modes Sniper and Battering Ram)
- using the `trident` attacktype (`-T trident`), provide one wordlist per key, and terminate upon reaching either the end of the shortest wordlist (default) or the longest (`--longest --fillvalue VALUE`) (similar to Burp Suite's Intruder mode Pitchfork)
- in `body` mode (`-m body`), use a template to submit dynamic body content to a given target, utilizing either the `shotgun` or `trident` attacktype (also supports URL-based modes above)
- `body` is not yet implemented
### Installation [GitHub](https://github.com/bonifield/RequestInjector) [PyPi](https://pypi.org/project/requestinjector/)
```
pip install requestinjector
# will become available directly from $PATH as either "requestinjector" or "ri"
```
### Usage (Command Line Tool or Standalone Script Somewhere in $PATH)
```
v0.9.4
Last Updated: 2021-09-21
path mode (-M path):
# NOTE - although -w accepts a comma-separated list of wordlists as a string, only the first one will be used for this mode
requestinjector -u "http://example.com/somepath/a/b/c" \
-M path \
-w "/path/to/wordlist.txt" \
-t 10 \
-r 2 \
-m \
-p '{"http": "http://127.0.0.1:8080", "https": "https://127.0.0.1:8080"}' \
-H '{"Content-Type": "text/plain"}' \
--color
arg mode (-M arg) using shotgun attacktype (-T shotgun):
# NOTE - shotgun is similar to Burp Suite's sniper and battering ram modes; provide one or more keys, and a single wordlist
# NOTE - although -w accepts a comma-separated list of wordlists as a string, only the first one will be used for this attacktype
# NOTE - mutations (-m) not yet available for arg mode
requestinjector -u "http://example.com/somepath/a/b/c" \
-M arg \
-T shotgun \
-K key1,key2,key3,key4 \
-w "/path/to/wordlist.txt" \
-S statickey1=staticval1,statickey2=staticval2 \
-t 10 \
-r 2 \
-p '{"http": "http://127.0.0.1:8080", "https": "https://127.0.0.1:8080"}' \
-H '{"Content-Type": "text/plain"}' \
--color
arg mode (-M arg) using trident attacktype (-T trident), and optional static arguments (-S):
# NOTE - trident is similar to Burp Suite's pitchfork mode; for each key specified, provided a wordlist (-w WORDLIST1,WORDLIST2,etc); specify the same wordlist multiple times if using this attacktype and you want the same wordlist in multiple positions
# NOTE - this type will run through to the end of the shortest provided wordlist; use --longest and --fillvalue VALUE to run through the longest provided wordlist instead
# NOTE - mutations (-m) not yet available for arg mode
requestinjector -u "http://example.com/somepath/a/b/c" \
-M arg \
-T trident \
-K key1,key2,key3,key4 \
-w /path/to/wordlist1.txt,/path/to/wordlist2.txt,/path/to/wordlist3.txt,/path/to/wordlist4.txt \
-S statickey1=staticval1,statickey2=staticval2 \
-t 10 \
-r 2 \
-p '{"http": "http://127.0.0.1:8080", "https": "https://127.0.0.1:8080"}' \
-H '{"Content-Type": "text/plain"}' \
--color
arg mode (-M arg) using trident attacktype (-T trident), optional static arguments (-S), and --longest and --fillvalue VALUE (itertools.zip_longest())
# NOTE - trident is similar to Burp Suite's pitchfork mode; for each key specified, provided a wordlist (-w WORDLIST1,WORDLIST2,etc); specify the same wordlist multiple times if using this attacktype and you want the same wordlist in multiple positions
# NOTE - --longest and --fillvalue VALUE will run through to the end of the longest provided wordlist, filling empty values with the provided fillvalue
# NOTE - mutations (-m) not yet available for arg mode
requestinjector -u "http://example.com/somepath/a/b/c" \
-M arg \
-T trident \
-K key1,key2,key3,key4 \
-w /path/to/wordlist1.txt,/path/to/wordlist2.txt,/path/to/wordlist3.txt,/path/to/wordlist4.txt \
-S statickey1=staticval1,statickey2=staticval2 \
--longest \
--fillvalue "AAAA" \
-t 10 \
-r 2 \
-p '{"http": "http://127.0.0.1:8080", "https": "https://127.0.0.1:8080"}' \
-H '{"Content-Type": "text/plain"}' \
--color
output modes: full (default), --simple_output (just status code and full url), --color (same as simple_output but the status code is colorized)
additional options:
-d/--delay [FLOAT] = add a delay, per thread, as a float (default 0.0)
or import as a module (from requestinjector import RequestInjector)
```
### Usage (Importable Module)
```
from requestinjector import RequestInjector
proxy = {'http': 'http://127.0.0.1:8080', 'https': 'https://127.0.0.1:8080'}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0', 'Accept': 'text/html'}
url = "http://example.com/somepath/a/b/c"
wordlist = ["/path/to/wordlist.txt"]
x = RequestInjector(url=url, wordlist=wordlist, threads=10, mutate_path=True, headers=headers, proxy=proxy, retries=1, staticargs="", injectkeys="", longest=None, fillvalue=None, simple_output=True)
x.run()
```
### Options (-h)
```
usage: requestinjector.py [-h] -u URL [-w WORDLIST] [-M MODE] [-H HEADERS]
[-p PROXY] [-r RETRIES] [-t THREADS] [-d DELAY] [-m]
[-T ATTACKTYPE] [--longest] [-F FILLVALUE]
[-S STATICARGS] [-K INJECTKEYS] [--color]
[--simple_output]
RequestInjector: scan a URL using a given wordlist with optional URL
transformations
optional arguments:
-h, --help show this help message and exit
required arguments:
-u URL, --url URL provide a URL to check
general arguments:
-w WORDLIST, --wordlist WORDLIST
provide a wordlist (file) location, or multiple comma-
separated files in a string, ex. -w
/home/user/words1.txt or -w
/home/user/words1.txt,/home/user/words2.txt, etc
-M MODE, --mode MODE provide a mode (path|arg|body(NYI)) (default path)
-H HEADERS, --headers HEADERS
provide a dictionary of headers to include, with
single-quotes wrapping the dictionary and double-
quotes wrapping the keys and values, ex. '{"Content-
Type": "application/json"}' (defaults to a Firefox
User-Agent and Accept: text/html) *note default is set
inside PathWorker class*
-p PROXY, --proxy PROXY
provide a dictionary of proxies to use, with single-
quotes wrapping the dictionary and double-quotes
wrapping the keys and values, ex. '{"http":
"http://127.0.0.1:8080", "https":
"https://127.0.0.1:8080"}'
-r RETRIES, --retries RETRIES
provide the number of times to retry a connection
(default 1)
-t THREADS, --threads THREADS
provide the number of threads for making requests
(default 10)
-d DELAY, --delay DELAY
provide a delay between requests, per thread, as a
float (default 0.0); use fewer threads and longer
delays if the goal is to be less noisy, although the
amount of requests will remain the same
-m, --mutate provide if mutations should be applied to the checked
URL+word (currently only supports path mode, arg mode
support nyi)
arg mode-specific arguments:
-T ATTACKTYPE, --attacktype ATTACKTYPE
provide an attack type (shotgun|trident); shotgun is
similar to Burp Suite's sniper and battering ram
modes, and trident is similar to pitchfork (default
shotgun)
--longest provide if you wish to fully exhaust the longest
wordlist using the trident attacktype, and not stop
when the end of shortest wordlist has been reached
(zip() vis itertools.zip_longest()
-F FILLVALUE, --fillvalue FILLVALUE
provide a string to use in null values when using
--longest with the trident attacktype (such as when
using two wordlists of differing lengths; the
fillvalue will be used when the shortest wordlist has
finished, but terms are still being used from the
longest wordlist)
-S STATICARGS, --staticargs STATICARGS
provide a string of static key=value pairs to include
in each request, appended to the end of the query, as
a comma-separated string, ex. key1=val1,key2=val2 etc
-K INJECTKEYS, --injectkeys INJECTKEYS
provide a string of keys to be used; using the shotgun
attacktype, each key will receive values from only the
first wordlist; using the trident attacktype, each key
must have a specifc wordlist specified in the matching
position with the -w WORDLIST option; ex. '-T trident
-K user,account,sid -w
userwords.txt,accountids.txt,sids.txt'
output arguments:
--color provide if stdout should have colorized status codes
(will force simple_output format)
--simple_output provide for simplified output, just status code and
URL, ex. 200 http://example.com
```
### Example Output
```
# Standard Format
# Provided URL: http://example.com/somepath/exists
# Note the IP and port reflect the proxy being used; without a proxy, this will reflect the external address being scanned
status_code:404 bytes:12 word:contactus ip:127.0.0.1 port:8080 url:http://example.com/contactus
status_code:404 bytes:12 word:contactus ip:127.0.0.1 port:8080 url:http://example.com/somepath/contactus
status_code:200 bytes:411 word:contactus ip:127.0.0.1 port:8080 url:http://example.com/somepath/exists/contactus
status_code:404 bytes:12 word:admin ip:127.0.0.1 port:8080 url:http://example.com/admin
status_code:200 bytes:556 word:admin ip:127.0.0.1 port:8080 url:http://example.com/somepath/admin
status_code:200 bytes:556 word:admin ip:127.0.0.1 port:8080 url:http://example.com/somepath/exists/admin
# Simplified Format (simple_output)
404 http://example.com/contactus
404 http://example.com/somepath/contactus
200 http://example.com/somepath/exists/contactus
404 http://example.com/admin
200 http://example.com/somepath/admin
200 http://example.com/somepath/exists/admin
```
### TODO
- preview mode
- body mode, recursive grep, method select/switching
- logfile dump for every execution
- redirect history handling
- body POST/PUT objects using a config
- optional encodings and obfuscation of words/terms
- better output handling to support response body content, headers sent/received, etc
- move more logic out of Worker classes and into pre-processing/Filler and post-processing/Drainer classes
- jitter, rotating user agents, arg mode mutations (duplicate keys, re-order, null bytes, etc)
- "real timeout" (-R) to use with requests | /requestinjector-0.9.4.tar.gz/requestinjector-0.9.4/README.md | 0.662141 | 0.875574 | README.md | pypi |
from requests.auth import HTTPBasicAuth
from requests.auth import HTTPDigestAuth
from requests_oauthlib import OAuth2
from requests_ntlm3 import HttpNtlmAuth
from requests_ntlm3 import NtlmCompatibility
from requests_kerberos import HTTPKerberosAuth, REQUIRED
def basic_auth(username: str, password: str) -> HTTPBasicAuth:
"""
Creates a ``HTTPBasicAuth`` object to provide the ``.RequestsAPI`` object for authentication.
:param username: The username to authenticate as.
:param password: The password of the user to authenticate with.
:return: ``HTTPBasicAuth`` object.
"""
return HTTPBasicAuth(username, password)
def digest_auth(username: str, password: str) -> HTTPDigestAuth:
"""
Creates a ``HTTPDigestAuth`` object to provide the ``.RequestsAPI`` object for authentication.
:param username: The username to authenticate as.
:param password: The password of the user to authenticate with.
:return: ``HTTPDigestAuth`` object.
"""
return HTTPDigestAuth(username, password)
def oauth2_auth(client_id: int, token: dict) -> OAuth2:
"""
Creates an ``OAuth2`` object to provide the ``.RequestsAPI`` object for authentication.
:param client_id: Client ID obtained during registration
:param token: Token dictionary, must include access_token and token_type
:return: ``OAuth2`` object.
"""
return OAuth2(client_id, None, token)
def ntlm_auth(
username: str,
password: str,
send_cbt: bool = False,
ntlm_compatibility: int = NtlmCompatibility.NTLMv2_DEFAULT
) -> HttpNtlmAuth:
"""
Creates an ``HttpNtlmAuth`` object to provide the ``.RequestsAPI`` object for authentication.
:param username: Username in 'domain\\username' format
:param password: Password
:param send_cbt: Send channel bindings over a HTTPS channel?
:param ntlm_compatibility: Compatibility level for auth message
:return: ``HttpNtlmAuth`` object.
"""
return HttpNtlmAuth(username, password, send_cbt, ntlm_compatibility)
def kerberos_auth(
mutual_authentication: int = REQUIRED,
service: str = "HTTP",
delegate: bool = False,
force_preemptive: bool = False,
principal: str = None,
hostname_override: str = None,
sanitize_mutual_error_response: bool = True,
send_cbt: bool = True
) -> HTTPKerberosAuth:
"""
Creates a ``HTTPKerberosAuth`` object to provide the ``.RequestsAPI`` object for authentication.
**REQUIRES** a valid Ticket-Granting-Ticket (TGT).
:param mutual_authentication: Enable mutual authentication?
:param service: Service type for header.
:param delegate: Enable credential delegation?
:param force_preemptive: Force Kerberos GSS exchange prior to 401 Unauthorized response
:param principal: Override the default principal
:param hostname_override: Override hostname
:return: ``HTTPKerberosAuth`` object.
"""
return HTTPKerberosAuth(
mutual_authentication,
service,
delegate,
force_preemptive,
principal,
hostname_override,
sanitize_mutual_error_response,
send_cbt
) | /requests_api-1.1.5-py3-none-any.whl/requests_api/auth.py | 0.883525 | 0.17614 | auth.py | pypi |
import json
from enum import Enum
from functools import wraps
from http import HTTPStatus
from itertools import zip_longest
from typing import Any, Dict, List, NamedTuple, Optional
from unittest import TestCase
import requests
from requests import Request
from responses import mock as response_mock
__all__ = ['RequestMock', 'IGNORED']
IGNORED = object()
class AssertRequests:
def __init__(self, request_mocks: List['RequestMock'], test_case: TestCase = None):
self.request_mocks = request_mocks
self.test_case = test_case
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
self.test_case = args[0]
with self:
return func(*args, **kwargs)
return inner
def __enter__(self):
for request_mock in self.request_mocks:
response_mock.add(
method=request_mock.request_method.value,
url=request_mock.request_url,
json=request_mock.response_json,
status=request_mock.response_status_code,
)
response_mock.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self._check_requests(exc_val)
finally:
response_mock.__exit__(exc_type, exc_val, exc_tb)
if exc_type is requests.exceptions.ConnectionError:
return True
def _check_requests(self, exc_val):
for request_mock, mocked_call in zip_longest(self.request_mocks, response_mock.calls):
request_mock: RequestMock
request: Optional[Request] = mocked_call.request if mocked_call else None
if request_mock:
sub_test_name = f'{request_mock.request_method.value} {request_mock.request_url}'
else:
sub_test_name = f'[Unexpected] {request.method} {request.url}'
with self.test_case.subTest(sub_test_name):
try:
self.test_case.assertIsNotNone(request, 'The pending request is missing!')
self.test_case.assertIsNotNone(request_mock, 'This request is unexpected!')
self._assert_urls_equals(request_mock.request_url, request.url)
self.test_case.assertEqual(request_mock.request_method.value, request.method, 'Wrong method!')
if request_mock.request_json is not IGNORED:
self._assert_request_json_equal(request_mock.request_json, request.body)
if request_mock.request_body is not IGNORED:
self.test_case.assertEqual(request_mock.request_body, request.body, 'Wrong body')
if request_mock.request_headers is not IGNORED:
self.test_case.assertEqual(request_mock.request_headers, request.headers, 'Wrong headers!')
if request_mock.request_headers_contains is not IGNORED:
self.test_case.assertEqual(request_mock.request_headers_contains,
{key: value
for key, value in request.headers.items()
if key in request_mock.request_headers_contains},
'Wrong headers!')
except AssertionError as error:
if exc_val:
raise error from exc_val
raise
def _assert_urls_equals(self, expected_url: str, actual_url: str):
if expected_url and actual_url and actual_url[-1] == '/' and expected_url[-1] != '/':
expected_url += '/'
self.test_case.assertEqual(expected_url, actual_url, 'Wrong URL!')
def _assert_request_json_equal(self, expected_json, actual_body):
try:
self.test_case.assertEqual(
expected_json,
json.loads(actual_body),
'Wrong body!'
)
except json.decoder.JSONDecodeError as error:
raise AssertionError(f'JSON is broken!') from error
class RequestMock(NamedTuple):
# noinspection PyUnresolvedReferences
"""
Used to mock response and validate that the requests happened in the right order with right data
Usage example:
>>> import requests
>>>
>>> def get_likes_on_post(username, password, post_id):
... access_token = requests.post('http://my.site/login',
... json={'username': username, 'password': password}).json()['access_token']
...
... likes = requests.get(f'http://my.site/posts/{post_id}',
... headers={'Accept': 'application/json',
... 'Authorization': f'Bearer {access_token}'}).json()['likes']
...
... return likes
>>>
>>> class TestGetLikesOnPost(TestCase):
... @RequestMock.assert_requests([
... RequestMock(
... request_url='http://my.site/login',
... request_json={'username': 'the name', 'password': 'the password'},
... request_method=RequestMock.Method.POST,
... response_json={"access_token": 'the-token'}
... ),
... RequestMock(
... request_url='http://my.site/posts/3',
... request_headers_contains={'Authorization': 'Bearer the-token'},
... response_json={'name': 'The cool story', 'likes': 42}
... )
... ])
... def test_get_likes_on_post(self):
... self.assertEqual(42, get_likes_on_post('the name', 'the password', 3))
>>>
>>> TestGetLikesOnPost('test_get_likes_on_post').run()
<unittest.result.TestResult run=1 errors=0 failures=0>
"""
class Method(Enum):
POST = response_mock.POST
GET = response_mock.GET
PUT = response_mock.PUT
PATCH = response_mock.PATCH
DELETE = response_mock.DELETE
HEAD = response_mock.HEAD
OPTIONS = response_mock.OPTIONS
request_url: str
request_method: Method = Method.GET
request_json: Any = IGNORED
request_body: str = IGNORED
request_headers: Dict[str, Any] = IGNORED
request_headers_contains: Dict[str, Any] = IGNORED
response_json: Any = None
response_status_code: HTTPStatus = HTTPStatus.OK
assert_requests = staticmethod(AssertRequests) # can be used both as decorator or context manager | /requests-asserts-0.1.3.tar.gz/requests-asserts-0.1.3/requests_asserts.py | 0.71403 | 0.19673 | requests_asserts.py | pypi |
import asyncio
import http
import inspect
import io
import json
import queue
import threading
import types
import typing
from urllib.parse import unquote, urljoin, urlsplit
import requests
import http3
from .adapters import HTTPAdapter
from .sessions import Session
class _HeaderDict(requests.packages.urllib3._collections.HTTPHeaderDict):
def get_all(self, key: str, default: str) -> str:
return self.getheaders(key)
class _MockOriginalResponse:
"""
We have to jump through some hoops to present the response as if
it was made using urllib3.
"""
def __init__(self, headers: typing.List[typing.Tuple[bytes, bytes]]) -> None:
self.msg = _HeaderDict(headers)
self.closed = False
def isclosed(self) -> bool:
return self.closed
def _get_reason_phrase(status_code: int) -> str:
try:
return http.HTTPStatus(status_code).phrase
except ValueError:
return ""
class ASGIAdapter(HTTPAdapter):
def __init__(self, app, suppress_exceptions: bool = False) -> None:
self.app = app
self.suppress_exceptions = suppress_exceptions
async def send( # type: ignore
self, request: requests.PreparedRequest, *args: typing.Any, **kwargs: typing.Any
) -> requests.Response:
scheme, netloc, path, query, fragment = urlsplit(request.url) # type: ignore
default_port = {"http": 80, "ws": 80, "https": 443, "wss": 443}[scheme]
if ":" in netloc:
host, port_string = netloc.split(":", 1)
port = int(port_string)
else:
host = netloc
port = default_port
# Include the 'host' header.
if "host" in request.headers:
headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]
elif port == default_port:
headers = [(b"host", host.encode())]
else:
headers = [(b"host", (f"{host}:{port}").encode())]
# Include other request headers.
headers += [
(key.lower().encode(), value.encode())
for key, value in request.headers.items()
]
scope = {
"type": "http",
"http_version": "1.1",
"method": request.method,
"path": unquote(path),
"root_path": "",
"scheme": scheme,
"query_string": query.encode(),
"headers": headers,
"client": ["testclient", 50000],
"server": [host, port],
"extensions": {"http.response.template": {}},
}
async def receive():
nonlocal request_complete, response_complete
if request_complete:
while not response_complete:
await asyncio.sleep(0.0001)
return {"type": "http.disconnect"}
body = request.body
if isinstance(body, str):
body_bytes = body.encode("utf-8") # type: bytes
elif body is None:
body_bytes = b""
elif isinstance(body, types.GeneratorType):
try:
chunk = body.send(None)
if isinstance(chunk, str):
chunk = chunk.encode("utf-8")
return {"type": "http.request", "body": chunk, "more_body": True}
except StopIteration:
request_complete = True
return {"type": "http.request", "body": b""}
else:
body_bytes = body
request_complete = True
return {"type": "http.request", "body": body_bytes}
async def send(message) -> None:
nonlocal raw_kwargs, response_started, response_complete, template, context
if message["type"] == "http.response.start":
assert (
not response_started
), 'Received multiple "http.response.start" messages.'
raw_kwargs["status_code"] = message["status"]
raw_kwargs["headers"] = message["headers"]
response_started = True
elif message["type"] == "http.response.body":
assert (
response_started
), 'Received "http.response.body" without "http.response.start".'
assert (
not response_complete
), 'Received "http.response.body" after response completed.'
body = message.get("body", b"")
more_body = message.get("more_body", False)
if request.method != "HEAD":
raw_kwargs["content"] += body
if not more_body:
response_complete = True
elif message["type"] == "http.response.template":
template = message["template"]
context = message["context"]
request_complete = False
response_started = False
response_complete = False
raw_kwargs = {"content": b""} # type: typing.Dict[str, typing.Any]
template = None
context = None
try:
await self.app(scope, receive, send)
except BaseException as exc:
if not self.suppress_exceptions:
raise exc from None
if not self.suppress_exceptions:
assert response_started, "TestClient did not receive any response."
elif not response_started:
raw_kwargs = {"status_code": 500, "headers": []}
raw = http3.AsyncResponse(**raw_kwargs)
response = self.build_response(request, raw)
if template is not None:
response.template = template
response.context = context
return response
class ASGISession(Session):
def __init__(
self,
app,
base_url: str = "http://mockserver",
suppress_exceptions: bool = False,
) -> None:
super(ASGISession, self).__init__()
adapter = ASGIAdapter(app, suppress_exceptions=suppress_exceptions)
self.mount("http://", adapter)
self.mount("https://", adapter)
self.headers.update({"user-agent": "testclient"})
self.app = app
self.base_url = base_url
async def request(self, method, url, *args, **kwargs) -> requests.Response:
url = urljoin(self.base_url, url)
return await super().request(method, url, *args, **kwargs) | /requests-async-0.6.2.tar.gz/requests-async-0.6.2/requests_async/asgi.py | 0.603815 | 0.163345 | asgi.py | pypi |
import base64
import json
import os
import datetime
import threading
import logging
from requests_auth.errors import *
logger = logging.getLogger(__name__)
def _decode_base64(base64_encoded_string: str) -> str:
"""
Decode base64, padding being optional.
:param base64_encoded_string: Base64 data as an ASCII byte string
:returns: The decoded byte string.
"""
missing_padding = len(base64_encoded_string) % 4
if missing_padding != 0:
base64_encoded_string += "=" * (4 - missing_padding)
return base64.b64decode(base64_encoded_string).decode("unicode_escape")
def _is_expired(expiry: float, early_expiry: float) -> bool:
return (
datetime.datetime.utcfromtimestamp(expiry - early_expiry)
< datetime.datetime.utcnow()
)
def _to_expiry(expires_in: Union[int, str]) -> float:
expiry = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc
) + datetime.timedelta(seconds=int(expires_in))
return expiry.timestamp()
class TokenMemoryCache:
"""
Class to manage tokens using memory storage.
"""
def __init__(self):
self.tokens = {}
self.forbid_concurrent_cache_access = threading.Lock()
self.forbid_concurrent_missing_token_function_call = threading.Lock()
def _add_bearer_token(self, key: str, token: str):
"""
Set the bearer token and save it
:param key: key identifier of the token
:param token: value
:raise InvalidToken: In case token is invalid.
:raise TokenExpiryNotProvided: In case expiry is not provided.
"""
if not token:
raise InvalidToken(token)
header, body, other = token.split(".")
body = json.loads(_decode_base64(body))
expiry = body.get("exp")
if not expiry:
raise TokenExpiryNotProvided(expiry)
self._add_token(key, token, expiry)
def _add_access_token(
self,
key: str,
token: str,
expires_in: Union[int, str],
refresh_token: str = None,
):
"""
Set the bearer token and save it
:param key: key identifier of the token
:param token: value
:param expires_in: Number of seconds before token expiry
:param refresh_token: refresh token value
:raise InvalidToken: In case token is invalid.
"""
self._add_token(key, token, _to_expiry(expires_in), refresh_token)
def _add_token(
self, key: str, token: str, expiry: float, refresh_token: str = None
):
"""
Set the bearer token and save it
:param key: key identifier of the token
:param token: value
:param expiry: UTC timestamp of expiry
:param refresh_token: refresh token value
"""
with self.forbid_concurrent_cache_access:
self.tokens[key] = token, expiry, refresh_token
self._save_tokens()
logger.debug(
f'Inserting token expiring on {datetime.datetime.utcfromtimestamp(expiry)} (UTC) with "{key}" key: {token}'
)
def get_token(
self,
key: str,
*,
early_expiry: float = 30.0,
on_missing_token=None,
on_expired_token=None,
**on_missing_token_kwargs,
) -> str:
"""
Return the bearer token.
:param key: key identifier of the token
:param early_expiry: As the time between the token extraction from cache and the token reception on server side
might not higher than one second, on slow networks, token might be expired when received by the actual server,
even if still valid when fetched.
This is the number of seconds to subtract to the actual token expiry. Token will be considered as
expired 30 seconds before real expiry by default.
:param on_missing_token: function to call when token is expired or missing (returning token and expiry tuple)
:param on_expired_token: function to call to refresh the token when it is expired
:param on_missing_token_kwargs: arguments of the on_missing_token function (key-value arguments)
:return: the token
:raise AuthenticationFailed: in case token cannot be retrieved.
"""
logger.debug(f'Retrieving token with "{key}" key.')
refresh_token = None
with self.forbid_concurrent_cache_access:
self._load_tokens()
if key in self.tokens:
token = self.tokens[key]
if len(token) == 2: # No refresh token
bearer, expiry = token
else:
bearer, expiry, refresh_token = token
if _is_expired(expiry, early_expiry):
logger.debug(f'Authentication token with "{key}" key is expired.')
del self.tokens[key]
else:
logger.debug(
f"Using already received authentication, will expire on {datetime.datetime.utcfromtimestamp(expiry)} (UTC)."
)
return bearer
if refresh_token is not None and on_expired_token is not None:
try:
with self.forbid_concurrent_missing_token_function_call:
state, token, expires_in, refresh_token = on_expired_token(
refresh_token
)
self._add_access_token(state, token, expires_in, refresh_token)
logger.debug(f"Refreshed token with key {key}.")
with self.forbid_concurrent_cache_access:
if state in self.tokens:
bearer, expiry, refresh_token = self.tokens[state]
logger.debug(
f"Using newly refreshed token, expiring on {datetime.datetime.utcfromtimestamp(expiry)} (UTC)."
)
return bearer
except (InvalidGrantRequest, GrantNotProvided):
logger.debug(f"Failed to refresh token.")
logger.debug("Token cannot be found in cache.")
if on_missing_token is not None:
with self.forbid_concurrent_missing_token_function_call:
new_token = on_missing_token(**on_missing_token_kwargs)
if len(new_token) == 2: # Bearer token
state, token = new_token
self._add_bearer_token(state, token)
elif len(new_token) == 3: # Access token
state, token, expires_in = new_token
self._add_access_token(state, token, expires_in)
else: # Access token and Refresh token
state, token, expires_in, refresh_token = new_token
self._add_access_token(state, token, expires_in, refresh_token)
if key != state:
logger.warning(
f"Using a token received on another key than expected. Expecting {key} but was {state}."
)
with self.forbid_concurrent_cache_access:
if state in self.tokens:
bearer, expiry, refresh_token = self.tokens[state]
logger.debug(
f"Using newly received authentication, expiring on {datetime.datetime.utcfromtimestamp(expiry)} (UTC)."
)
return bearer
logger.debug(
f"User was not authenticated: key {key} cannot be found in {self.tokens}."
)
raise AuthenticationFailed()
def clear(self):
with self.forbid_concurrent_cache_access:
logger.debug("Clearing token cache.")
self.tokens = {}
self._clear()
def _save_tokens(self):
pass
def _load_tokens(self):
pass
def _clear(self):
pass
class JsonTokenFileCache(TokenMemoryCache):
"""
Class to manage tokens using a cache file.
"""
def __init__(self, tokens_path: str):
TokenMemoryCache.__init__(self)
self.tokens_path = tokens_path
self.last_save_time = 0
self._load_tokens()
def _clear(self):
self.last_save_time = 0
try:
os.remove(self.tokens_path)
except:
logger.debug("Cannot remove tokens file.")
def _save_tokens(self):
try:
with open(self.tokens_path, "w") as tokens_cache_file:
json.dump(self.tokens, tokens_cache_file)
self.last_save_time = os.path.getmtime(self.tokens_path)
except:
logger.exception("Cannot save tokens.")
def _load_tokens(self):
if not os.path.exists(self.tokens_path):
logger.debug("No token loaded. Token cache does not exists.")
return
try:
last_modification_time = os.path.getmtime(self.tokens_path)
if last_modification_time > self.last_save_time:
self.last_save_time = last_modification_time
with open(self.tokens_path, "r") as tokens_cache_file:
self.tokens = json.load(tokens_cache_file)
except:
logger.exception("Cannot load tokens.") | /requests_auth-7.0.0-py3-none-any.whl/requests_auth/oauth2_tokens.py | 0.755276 | 0.266137 | oauth2_tokens.py | pypi |
from json import JSONDecodeError
from typing import Union
from requests import Response
class AuthenticationFailed(Exception):
"""User was not authenticated."""
def __init__(self):
Exception.__init__(self, "User was not authenticated.")
class TimeoutOccurred(Exception):
"""No response within timeout interval."""
def __init__(self, timeout: float):
Exception.__init__(
self, f"User authentication was not received within {timeout} seconds."
)
class InvalidToken(Exception):
"""Token is invalid."""
def __init__(self, token_name: str):
Exception.__init__(self, f"{token_name} is invalid.")
class GrantNotProvided(Exception):
"""Grant was not provided."""
def __init__(self, grant_name: str, dictionary_without_grant: dict):
Exception.__init__(
self, f"{grant_name} not provided within {dictionary_without_grant}."
)
class InvalidGrantRequest(Exception):
"""
If the request failed client authentication or is invalid, the authorization server returns an error response as described in https://tools.ietf.org/html/rfc6749#section-5.2
"""
# https://tools.ietf.org/html/rfc6749#section-5.2
request_errors = {
"invalid_request": "The request is missing a required parameter, includes an unsupported parameter value (other than grant type), repeats a parameter, includes multiple credentials, utilizes more than one mechanism for authenticating the client, or is otherwise malformed.",
"invalid_client": 'Client authentication failed (e.g., unknown client, no client authentication included, or unsupported authentication method). The authorization server MAY return an HTTP 401 (Unauthorized) status code to indicate which HTTP authentication schemes are supported. If the client attempted to authenticate via the "Authorization" request header field, the authorization server MUST respond with an HTTP 401 (Unauthorized) status code and include the "WWW-Authenticate" response header field matching the authentication scheme used by the client.',
"invalid_grant": "The provided authorization grant (e.g., authorization code, resource owner credentials) or refresh token is invalid, expired, revoked, does not match the redirection URI used in the authorization request, or was issued to another client.",
"unauthorized_client": "The authenticated client is not authorized to use this authorization grant type.",
"unsupported_grant_type": "The authorization grant type is not supported by the authorization server.",
"invalid_scope": "The requested scope is invalid, unknown, malformed, or exceeds the scope granted by the resource owner.",
}
# https://tools.ietf.org/html/rfc6749#section-4.2.2.1
# https://tools.ietf.org/html/rfc6749#section-4.1.2.1
browser_errors = {
"invalid_request": "The request is missing a required parameter, includes an invalid parameter value, includes a parameter more than once, or is otherwise malformed.",
"unauthorized_client": "The client is not authorized to request an authorization code or an access token using this method.",
"access_denied": "The resource owner or authorization server denied the request.",
"unsupported_response_type": "The authorization server does not support obtaining an authorization code or an access token using this method.",
"invalid_scope": "The requested scope is invalid, unknown, or malformed.",
"server_error": "The authorization server encountered an unexpected condition that prevented it from fulfilling the request. (This error code is needed because a 500 Internal Server Error HTTP status code cannot be returned to the client via an HTTP redirect.)",
"temporarily_unavailable": "The authorization server is currently unable to handle the request due to a temporary overloading or maintenance of the server. (This error code is needed because a 503 Service Unavailable HTTP status code cannot be returned to the client via an HTTP redirect.)",
}
def __init__(self, response: Union[Response, dict]):
Exception.__init__(self, InvalidGrantRequest.to_message(response))
@staticmethod
def to_message(response: Union[Response, dict]) -> str:
"""
Handle response as described in:
* https://tools.ietf.org/html/rfc6749#section-5.2
* https://tools.ietf.org/html/rfc6749#section-4.1.2.1
* https://tools.ietf.org/html/rfc6749#section-4.2.2.1
"""
if isinstance(response, dict):
return InvalidGrantRequest.to_oauth2_message(
response, InvalidGrantRequest.browser_errors
)
try:
return InvalidGrantRequest.to_oauth2_message(
response.json(), InvalidGrantRequest.request_errors
)
except JSONDecodeError:
return response.text
@staticmethod
def to_oauth2_message(content: dict, errors: dict) -> str:
"""
Handle content as described in:
* https://tools.ietf.org/html/rfc6749#section-5.2
* https://tools.ietf.org/html/rfc6749#section-4.1.2.1
* https://tools.ietf.org/html/rfc6749#section-4.2.2.1
"""
def _pop(key: str) -> str:
value = content.pop(key, None)
if value and isinstance(value, list):
value = value[0]
return value
if "error" in content:
error = _pop("error")
error_description = _pop("error_description") or errors.get(error)
message = f"{error}: {error_description}"
if "error_uri" in content:
message += f"\nMore information can be found on {_pop('error_uri')}"
if content:
message += f"\nAdditional information: {content}"
else:
message = f"{content}"
return message
class StateNotProvided(Exception):
"""State was not provided."""
def __init__(self, dictionary_without_state: dict):
Exception.__init__(
self, f"state not provided within {dictionary_without_state}."
)
class TokenExpiryNotProvided(Exception):
"""Token expiry was not provided."""
def __init__(self, token_body: dict):
Exception.__init__(self, f"Expiry (exp) is not provided in {token_body}.") | /requests_auth-7.0.0-py3-none-any.whl/requests_auth/errors.py | 0.920285 | 0.167423 | errors.py | pypi |
import base64
import os
import uuid
from hashlib import sha256, sha512
from urllib.parse import parse_qs, urlsplit, urlunsplit, urlencode
from typing import Optional
import requests
import requests.auth
import warnings
from requests_auth import oauth2_authentication_responses_server, oauth2_tokens
from requests_auth.errors import InvalidGrantRequest, GrantNotProvided
def _add_parameters(initial_url: str, extra_parameters: dict) -> str:
"""
Add parameters to an URL and return the new URL.
:param initial_url:
:param extra_parameters: dictionary of parameters name and value.
:return: the new URL containing parameters.
"""
scheme, netloc, path, query_string, fragment = urlsplit(initial_url)
query_params = parse_qs(query_string)
query_params.update(
{
parameter_name: [parameter_value]
for parameter_name, parameter_value in extra_parameters.items()
}
)
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def _pop_parameter(url: str, query_parameter_name: str) -> (str, Optional[str]):
"""
Remove and return parameter of an URL.
:param url: The URL containing (or not) the parameter.
:param query_parameter_name: The query parameter to pop.
:return: The new URL (without this parameter) and the parameter value (None if not found).
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
parameter_value = query_params.pop(query_parameter_name, None)
new_query_string = urlencode(query_params, doseq=True)
return (
urlunsplit((scheme, netloc, path, new_query_string, fragment)),
parameter_value,
)
def _get_query_parameter(url: str, param_name: str) -> Optional[str]:
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
all_values = query_params.get(param_name)
return all_values[0] if all_values else None
def request_new_grant_with_post(
url: str, data, grant_name: str, timeout: float, session: requests.Session
) -> (str, int, str):
with session:
response = session.post(url, data=data, timeout=timeout)
if not response:
# As described in https://tools.ietf.org/html/rfc6749#section-5.2
raise InvalidGrantRequest(response)
content = response.json()
token = content.get(grant_name)
if not token:
raise GrantNotProvided(grant_name, content)
return token, content.get("expires_in"), content.get("refresh_token")
class OAuth2:
token_cache = oauth2_tokens.TokenMemoryCache()
class SupportMultiAuth:
"""Inherit from this class to be able to use your class with requests_auth provided authentication classes."""
def __add__(self, other):
if isinstance(other, _MultiAuth):
return _MultiAuth(self, *other.authentication_modes)
return _MultiAuth(self, other)
def __and__(self, other):
if isinstance(other, _MultiAuth):
return _MultiAuth(self, *other.authentication_modes)
return _MultiAuth(self, other)
class BrowserAuth:
def __init__(self, kwargs):
"""
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 code will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a code or a token to be received once requested.
Wait for 1 minute (60 seconds) by default.
:param success_display_time: In case a code is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received code is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
"""
redirect_uri_endpoint = kwargs.pop("redirect_uri_endpoint", None) or ""
self.redirect_uri_port = int(kwargs.pop("redirect_uri_port", None) or 5000)
self.redirect_uri = (
f"http://localhost:{self.redirect_uri_port}/{redirect_uri_endpoint}"
)
# Time is expressed in seconds
self.timeout = float(kwargs.pop("timeout", None) or 60)
# Time is expressed in milliseconds
self.success_display_time = int(kwargs.pop("success_display_time", None) or 1)
# Time is expressed in milliseconds
self.failure_display_time = int(
kwargs.pop("failure_display_time", None) or 5000
)
class OAuth2ResourceOwnerPasswordCredentials(requests.auth.AuthBase, SupportMultiAuth):
"""
Resource Owner Password Credentials Grant
Describes an OAuth 2 resource owner password credentials (also called password) flow requests authentication.
More details can be found in https://tools.ietf.org/html/rfc6749#section-4.3
"""
def __init__(self, token_url: str, username: str, password: str, **kwargs):
"""
:param token_url: OAuth 2 token URL.
:param username: Resource owner user name.
:param password: Resource owner password.
:param session_auth: Client authentication if the client type is confidential
or the client was issued client credentials (or assigned other authentication requirements).
Can be a tuple or any requests authentication class instance.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param scope: Scope parameter sent to token URL as body. Can also be a list of scopes. Not sent by default.
:param token_field_name: Field name containing the token. access_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param session: requests.Session instance that will be used to request the token.
Use it to provide a custom proxying rule for instance.
:param kwargs: all additional authorization parameters that should be put as body parameters in the token URL.
"""
self.token_url = token_url
if not self.token_url:
raise Exception("Token URL is mandatory.")
self.username = username
if not self.username:
raise Exception("User name is mandatory.")
self.password = password
if not self.password:
raise Exception("Password is mandatory.")
self.header_name = kwargs.pop("header_name", None) or "Authorization"
self.header_value = kwargs.pop("header_value", None) or "Bearer {token}"
if "{token}" not in self.header_value:
raise Exception("header_value parameter must contains {token}.")
self.token_field_name = kwargs.pop("token_field_name", None) or "access_token"
self.early_expiry = float(kwargs.pop("early_expiry", None) or 30.0)
# Time is expressed in seconds
self.timeout = int(kwargs.pop("timeout", None) or 60)
self.session = kwargs.pop("session", None) or requests.Session()
session_auth = kwargs.pop("session_auth", None)
if session_auth:
self.session.auth = session_auth
# As described in https://tools.ietf.org/html/rfc6749#section-4.3.2
self.data = {
"grant_type": "password",
"username": self.username,
"password": self.password,
}
scope = kwargs.pop("scope", None)
if scope:
self.data["scope"] = " ".join(scope) if isinstance(scope, list) else scope
self.data.update(kwargs)
# As described in https://tools.ietf.org/html/rfc6749#section-6
self.refresh_data = {"grant_type": "refresh_token"}
if scope:
self.refresh_data["scope"] = self.data["scope"]
self.refresh_data.update(kwargs)
all_parameters_in_url = _add_parameters(self.token_url, self.data)
self.state = sha512(all_parameters_in_url.encode("unicode_escape")).hexdigest()
def __call__(self, r):
token = OAuth2.token_cache.get_token(
key=self.state,
early_expiry=self.early_expiry,
on_missing_token=self.request_new_token,
on_expired_token=self.refresh_token,
)
r.headers[self.header_name] = self.header_value.format(token=token)
return r
def request_new_token(self):
# As described in https://tools.ietf.org/html/rfc6749#section-4.3.3
token, expires_in, refresh_token = request_new_grant_with_post(
self.token_url,
self.data,
self.token_field_name,
self.timeout,
self.session,
)
# Handle both Access and Bearer tokens
return (
(self.state, token, expires_in, refresh_token)
if expires_in
else (self.state, token)
)
def refresh_token(self, refresh_token: str):
# As described in https://tools.ietf.org/html/rfc6749#section-6
self.refresh_data["refresh_token"] = refresh_token
token, expires_in, refresh_token = request_new_grant_with_post(
self.token_url,
self.refresh_data,
self.token_field_name,
self.timeout,
self.session,
)
return self.state, token, expires_in, refresh_token
class OAuth2ClientCredentials(requests.auth.AuthBase, SupportMultiAuth):
"""
Client Credentials Grant
Describes an OAuth 2 client credentials (also called application) flow requests authentication.
More details can be found in https://tools.ietf.org/html/rfc6749#section-4.4
"""
def __init__(self, token_url: str, client_id: str, client_secret: str, **kwargs):
"""
:param token_url: OAuth 2 token URL.
:param client_id: Resource owner user name.
:param client_secret: Resource owner password.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param scope: Scope parameter sent to token URL as body. Can also be a list of scopes. Not sent by default.
:param token_field_name: Field name containing the token. access_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param session: requests.Session instance that will be used to request the token.
Use it to provide a custom proxying rule for instance.
:param kwargs: all additional authorization parameters that should be put as query parameter in the token URL.
"""
self.token_url = token_url
if not self.token_url:
raise Exception("Token URL is mandatory.")
self.client_id = client_id
if not self.client_id:
raise Exception("client_id is mandatory.")
self.client_secret = client_secret
if not self.client_secret:
raise Exception("client_secret is mandatory.")
self.header_name = kwargs.pop("header_name", None) or "Authorization"
self.header_value = kwargs.pop("header_value", None) or "Bearer {token}"
if "{token}" not in self.header_value:
raise Exception("header_value parameter must contains {token}.")
self.token_field_name = kwargs.pop("token_field_name", None) or "access_token"
self.early_expiry = float(kwargs.pop("early_expiry", None) or 30.0)
# Time is expressed in seconds
self.timeout = int(kwargs.pop("timeout", None) or 60)
self.session = kwargs.pop("session", None) or requests.Session()
self.session.auth = (self.client_id, self.client_secret)
# As described in https://tools.ietf.org/html/rfc6749#section-4.4.2
self.data = {"grant_type": "client_credentials"}
scope = kwargs.pop("scope", None)
if scope:
self.data["scope"] = " ".join(scope) if isinstance(scope, list) else scope
self.data.update(kwargs)
# Refresh tokens are not supported, as described in https://tools.ietf.org/html/rfc6749#section-4.4.3
all_parameters_in_url = _add_parameters(self.token_url, self.data)
self.state = sha512(all_parameters_in_url.encode("unicode_escape")).hexdigest()
def __call__(self, r):
token = OAuth2.token_cache.get_token(
key=self.state,
early_expiry=self.early_expiry,
on_missing_token=self.request_new_token,
)
r.headers[self.header_name] = self.header_value.format(token=token)
return r
def request_new_token(self) -> tuple:
# As described in https://tools.ietf.org/html/rfc6749#section-4.4.3
token, expires_in, _ = request_new_grant_with_post(
self.token_url,
self.data,
self.token_field_name,
self.timeout,
self.session,
)
# Handle both Access and Bearer tokens
return (self.state, token, expires_in) if expires_in else (self.state, token)
class OAuth2AuthorizationCode(requests.auth.AuthBase, SupportMultiAuth, BrowserAuth):
"""
Authorization Code Grant
Describes an OAuth 2 authorization code (also called access code) flow requests authentication.
Request a code with client browser, then request a token using this code.
Store the token and use it for subsequent valid requests.
More details can be found in https://tools.ietf.org/html/rfc6749#section-4.1
"""
def __init__(self, authorization_url: str, token_url: str, **kwargs):
"""
:param authorization_url: OAuth 2 authorization URL.
:param token_url: OAuth 2 token URL.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 code will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a code or a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a code is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received code is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param response_type: Value of the response_type query parameter if not already provided in authorization URL.
code by default.
:param token_field_name: Field name containing the token. access_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param code_field_name: Field name containing the code. code by default.
:param username: User name in case basic authentication should be used to retrieve token.
:param password: User password in case basic authentication should be used to retrieve token.
:param session: requests.Session instance that will be used to request the token.
Use it to provide a custom proxying rule for instance.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL and as body parameters in the token URL.
Usual parameters are:
* client_id: Corresponding to your Application ID (in Microsoft Azure app portal)
* client_secret: If client is not authenticated with the authorization server
* nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
"""
self.authorization_url = authorization_url
if not self.authorization_url:
raise Exception("Authorization URL is mandatory.")
self.token_url = token_url
if not self.token_url:
raise Exception("Token URL is mandatory.")
BrowserAuth.__init__(self, kwargs)
self.header_name = kwargs.pop("header_name", None) or "Authorization"
self.header_value = kwargs.pop("header_value", None) or "Bearer {token}"
if "{token}" not in self.header_value:
raise Exception("header_value parameter must contains {token}.")
self.token_field_name = kwargs.pop("token_field_name", None) or "access_token"
self.early_expiry = float(kwargs.pop("early_expiry", None) or 30.0)
username = kwargs.pop("username", None)
password = kwargs.pop("password", None)
self.auth = (username, password) if username and password else None
self.session = kwargs.pop("session", None) or requests.Session()
self.session.auth = self.auth
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.2
code_field_name = kwargs.pop("code_field_name", "code")
if _get_query_parameter(self.authorization_url, "response_type"):
# Ensure provided value will not be overridden
kwargs.pop("response_type", None)
else:
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.1
kwargs.setdefault("response_type", "code")
authorization_url_without_nonce = _add_parameters(
self.authorization_url, kwargs
)
authorization_url_without_nonce, nonce = _pop_parameter(
authorization_url_without_nonce, "nonce"
)
self.state = sha512(
authorization_url_without_nonce.encode("unicode_escape")
).hexdigest()
custom_code_parameters = {
"state": self.state,
"redirect_uri": self.redirect_uri,
}
if nonce:
custom_code_parameters["nonce"] = nonce
code_grant_url = _add_parameters(
authorization_url_without_nonce, custom_code_parameters
)
self.code_grant_details = oauth2_authentication_responses_server.GrantDetails(
code_grant_url,
code_field_name,
self.timeout,
self.success_display_time,
self.failure_display_time,
self.redirect_uri_port,
)
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.3
self.token_data = {
"grant_type": "authorization_code",
"redirect_uri": self.redirect_uri,
}
self.token_data.update(kwargs)
# As described in https://tools.ietf.org/html/rfc6749#section-6
self.refresh_data = {"grant_type": "refresh_token"}
self.refresh_data.update(kwargs)
def __call__(self, r):
token = OAuth2.token_cache.get_token(
key=self.state,
early_expiry=self.early_expiry,
on_missing_token=self.request_new_token,
on_expired_token=self.refresh_token,
)
r.headers[self.header_name] = self.header_value.format(token=token)
return r
def request_new_token(self):
# Request code
state, code = oauth2_authentication_responses_server.request_new_grant(
self.code_grant_details
)
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.3
self.token_data["code"] = code
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.4
token, expires_in, refresh_token = request_new_grant_with_post(
self.token_url,
self.token_data,
self.token_field_name,
self.timeout,
self.session,
)
# Handle both Access and Bearer tokens
return (
(self.state, token, expires_in, refresh_token)
if expires_in
else (self.state, token)
)
def refresh_token(self, refresh_token: str):
# As described in https://tools.ietf.org/html/rfc6749#section-6
self.refresh_data["refresh_token"] = refresh_token
token, expires_in, refresh_token = request_new_grant_with_post(
self.token_url,
self.refresh_data,
self.token_field_name,
self.timeout,
self.session,
)
return self.state, token, expires_in, refresh_token
class OAuth2AuthorizationCodePKCE(
requests.auth.AuthBase, SupportMultiAuth, BrowserAuth
):
"""
Proof Key for Code Exchange
Describes an OAuth 2 Proof Key for Code Exchange (PKCE) flow requests authentication.
Request a code with client browser, then request a token using this code.
Store the token and use it for subsequent valid requests.
More details can be found in https://tools.ietf.org/html/rfc7636
"""
def __init__(self, authorization_url: str, token_url: str, **kwargs):
"""
:param authorization_url: OAuth 2 authorization URL.
:param token_url: OAuth 2 token URL.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 code will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a code or a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a code is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received code is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param response_type: Value of the response_type query parameter if not already provided in authorization URL.
code by default.
:param token_field_name: Field name containing the token. access_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param code_field_name: Field name containing the code. code by default.
:param session: requests.Session instance that will be used to request the token.
Use it to provide a custom proxying rule for instance.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL and as body parameters in the token URL.
Usual parameters are:
* client_id: Corresponding to your Application ID (in Microsoft Azure app portal)
* client_secret: If client is not authenticated with the authorization server
* nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
"""
self.authorization_url = authorization_url
if not self.authorization_url:
raise Exception("Authorization URL is mandatory.")
self.token_url = token_url
if not self.token_url:
raise Exception("Token URL is mandatory.")
BrowserAuth.__init__(self, kwargs)
self.session = kwargs.pop("session", None) or requests.Session()
self.session.timeout = self.timeout
self.header_name = kwargs.pop("header_name", None) or "Authorization"
self.header_value = kwargs.pop("header_value", None) or "Bearer {token}"
if "{token}" not in self.header_value:
raise Exception("header_value parameter must contains {token}.")
self.token_field_name = kwargs.pop("token_field_name", None) or "access_token"
self.early_expiry = float(kwargs.pop("early_expiry", None) or 30.0)
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.2
code_field_name = kwargs.pop("code_field_name", "code")
authorization_url_without_response_type, response_type = _pop_parameter(
self.authorization_url, "response_type"
)
if response_type:
# Ensure provided value will not be overridden
kwargs["response_type"] = response_type
else:
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.1
kwargs.setdefault("response_type", "code")
authorization_url_without_nonce = _add_parameters(
authorization_url_without_response_type, kwargs
)
authorization_url_without_nonce, nonce = _pop_parameter(
authorization_url_without_nonce, "nonce"
)
self.state = sha512(
authorization_url_without_nonce.encode("unicode_escape")
).hexdigest()
custom_code_parameters = {
"state": self.state,
"redirect_uri": self.redirect_uri,
}
if nonce:
custom_code_parameters["nonce"] = nonce
# generate PKCE code verifier and challenge
code_verifier = self.generate_code_verifier()
code_challenge = self.generate_code_challenge(code_verifier)
# add code challenge parameters to the authorization_url request
custom_code_parameters["code_challenge"] = code_challenge
custom_code_parameters["code_challenge_method"] = "S256"
code_grant_url = _add_parameters(
authorization_url_without_nonce, custom_code_parameters
)
self.code_grant_details = oauth2_authentication_responses_server.GrantDetails(
code_grant_url,
code_field_name,
self.timeout,
self.success_display_time,
self.failure_display_time,
self.redirect_uri_port,
)
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.3
# include the PKCE code verifier used in the second part of the flow
self.token_data = {
"code_verifier": code_verifier,
"grant_type": "authorization_code",
"redirect_uri": self.redirect_uri,
}
self.token_data.update(kwargs)
# As described in https://tools.ietf.org/html/rfc6749#section-6
self.refresh_data = {"grant_type": "refresh_token"}
self.refresh_data.update(kwargs)
def __call__(self, r):
token = OAuth2.token_cache.get_token(
key=self.state,
early_expiry=self.early_expiry,
on_missing_token=self.request_new_token,
on_expired_token=self.refresh_token,
)
r.headers[self.header_name] = self.header_value.format(token=token)
return r
def request_new_token(self) -> tuple:
# Request code
state, code = oauth2_authentication_responses_server.request_new_grant(
self.code_grant_details
)
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.3
self.token_data["code"] = code
# As described in https://tools.ietf.org/html/rfc6749#section-4.1.4
token, expires_in, refresh_token = request_new_grant_with_post(
self.token_url,
self.token_data,
self.token_field_name,
self.timeout,
self.session,
)
# Handle both Access and Bearer tokens
return (
(self.state, token, expires_in, refresh_token)
if expires_in
else (self.state, token)
)
def refresh_token(self, refresh_token: str):
# As described in https://tools.ietf.org/html/rfc6749#section-6
self.refresh_data["refresh_token"] = refresh_token
token, expires_in, refresh_token = request_new_grant_with_post(
self.token_url,
self.refresh_data,
self.token_field_name,
self.timeout,
self.session,
)
return self.state, token, expires_in, refresh_token
@staticmethod
def generate_code_verifier() -> bytes:
"""
Source: https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py
Generates a 'code_verifier' as described in section 4.1 of RFC 7636.
This is a 'high-entropy cryptographic random string' that will be
impractical for an attacker to guess.
https://tools.ietf.org/html/rfc7636#section-4.1
:return: urlsafe base64-encoded random data.
"""
return base64.urlsafe_b64encode(os.urandom(64)).rstrip(b"=")
@staticmethod
def generate_code_challenge(verifier: bytes) -> bytes:
"""
Source: https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py
Creates a 'code_challenge' as described in section 4.2 of RFC 7636
by taking the sha256 hash of the verifier and then urlsafe
base64-encoding it.
https://tools.ietf.org/html/rfc7636#section-4.1
:param verifier: code_verifier as generated by generate_code_verifier()
:return: urlsafe base64-encoded sha256 hash digest, without '=' padding.
"""
digest = sha256(verifier).digest()
return base64.urlsafe_b64encode(digest).rstrip(b"=")
class OAuth2Implicit(requests.auth.AuthBase, SupportMultiAuth, BrowserAuth):
"""
Implicit Grant
Describes an OAuth 2 implicit flow requests authentication.
Request a token with client browser.
Store the token and use it for subsequent valid requests.
More details can be found in https://tools.ietf.org/html/rfc6749#section-4.2
"""
def __init__(self, authorization_url: str, **kwargs):
"""
:param authorization_url: OAuth 2 authorization URL.
:param response_type: Value of the response_type query parameter if not already provided in authorization URL.
token by default.
:param token_field_name: Name of the expected field containing the token.
id_token by default if response_type is id_token, else access_token.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* client_id: Corresponding to your Application ID (in Microsoft Azure app portal)
* nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
* prompt: none to avoid prompting the user if a session is already opened.
"""
self.authorization_url = authorization_url
if not self.authorization_url:
raise Exception("Authorization URL is mandatory.")
BrowserAuth.__init__(self, kwargs)
self.header_name = kwargs.pop("header_name", None) or "Authorization"
self.header_value = kwargs.pop("header_value", None) or "Bearer {token}"
if "{token}" not in self.header_value:
raise Exception("header_value parameter must contains {token}.")
response_type = _get_query_parameter(self.authorization_url, "response_type")
if response_type:
# Ensure provided value will not be overridden
kwargs.pop("response_type", None)
else:
# As described in https://tools.ietf.org/html/rfc6749#section-4.2.1
response_type = kwargs.setdefault("response_type", "token")
# As described in https://tools.ietf.org/html/rfc6749#section-4.2.2
token_field_name = kwargs.pop("token_field_name", None)
if not token_field_name:
token_field_name = (
"id_token" if "id_token" == response_type else "access_token"
)
self.early_expiry = float(kwargs.pop("early_expiry", None) or 30.0)
authorization_url_without_nonce = _add_parameters(
self.authorization_url, kwargs
)
authorization_url_without_nonce, nonce = _pop_parameter(
authorization_url_without_nonce, "nonce"
)
self.state = sha512(
authorization_url_without_nonce.encode("unicode_escape")
).hexdigest()
custom_parameters = {"state": self.state, "redirect_uri": self.redirect_uri}
if nonce:
custom_parameters["nonce"] = nonce
grant_url = _add_parameters(authorization_url_without_nonce, custom_parameters)
self.grant_details = oauth2_authentication_responses_server.GrantDetails(
grant_url,
token_field_name,
self.timeout,
self.success_display_time,
self.failure_display_time,
self.redirect_uri_port,
)
def __call__(self, r):
token = OAuth2.token_cache.get_token(
key=self.state,
early_expiry=self.early_expiry,
on_missing_token=oauth2_authentication_responses_server.request_new_grant,
grant_details=self.grant_details,
)
r.headers[self.header_name] = self.header_value.format(token=token)
return r
class AzureActiveDirectoryImplicit(OAuth2Implicit):
"""
Describes an Azure Active Directory (OAuth 2) "Access Token" requests authentication.
https://docs.microsoft.com/en-us/azure/active-directory/develop/access-tokens
"""
def __init__(self, tenant_id: str, client_id: str, **kwargs):
"""
:param tenant_id: Microsoft Tenant Identifier (formatted as an Universal Unique Identifier)
:param client_id: Microsoft Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
token by default.
:param token_field_name: Name of the expected field containing the token.
access_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* prompt: none to avoid prompting the user if a session is already opened.
"""
OAuth2Implicit.__init__(
self,
f"https://login.microsoftonline.com/{tenant_id}/oauth2/authorize",
client_id=client_id,
nonce=kwargs.pop("nonce", None) or str(uuid.uuid4()),
**kwargs,
)
class AzureActiveDirectoryImplicitIdToken(OAuth2Implicit):
"""
Describes an Azure Active Directory (OpenID Connect) "ID Token" requests authentication.
https://docs.microsoft.com/en-us/azure/active-directory/develop/id-tokens
"""
def __init__(self, tenant_id: str, client_id: str, **kwargs):
"""
:param tenant_id: Microsoft Tenant Identifier (formatted as an Universal Unique Identifier)
:param client_id: Microsoft Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
id_token by default.
:param token_field_name: Name of the expected field containing the token.
id_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* prompt: none to avoid prompting the user if a session is already opened.
"""
OAuth2Implicit.__init__(
self,
f"https://login.microsoftonline.com/{tenant_id}/oauth2/authorize",
client_id=client_id,
response_type=kwargs.pop("response_type", "id_token"),
token_field_name=kwargs.pop("token_field_name", "id_token"),
nonce=kwargs.pop("nonce", None) or str(uuid.uuid4()),
**kwargs,
)
class OktaImplicit(OAuth2Implicit):
"""
Describes an Okta (OAuth 2) "Access Token" implicit flow requests authentication.
https://developer.okta.com/docs/guides/implement-implicit/overview/
"""
def __init__(self, instance: str, client_id: str, **kwargs):
"""
:param instance: Okta instance (like "testserver.okta-emea.com")
:param client_id: Okta Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
token by default.
:param token_field_name: Name of the expected field containing the token.
access_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param authorization_server: Okta authorization server.
default by default.
:param scope: Scope parameter sent in query. Can also be a list of scopes.
Request ['openid', 'profile', 'email'] by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* prompt: none to avoid prompting the user if a session is already opened.
"""
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", None) or ["openid", "profile", "email"]
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2Implicit.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/authorize",
client_id=client_id,
nonce=kwargs.pop("nonce", None) or str(uuid.uuid4()),
**kwargs,
)
class OktaImplicitIdToken(OAuth2Implicit):
"""
Describes an Okta (OpenID Connect) "ID Token" implicit flow requests authentication.
"""
def __init__(self, instance: str, client_id: str, **kwargs):
"""
:param instance: Okta instance (like "testserver.okta-emea.com")
:param client_id: Okta Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
id_token by default.
:param token_field_name: Name of the expected field containing the token.
id_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param authorization_server: Okta authorization server
default by default.
:param scope: Scope parameter sent in query. Can also be a list of scopes.
Request ['openid', 'profile', 'email'] by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* prompt: none to avoid prompting the user if a session is already opened.
"""
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", None) or ["openid", "profile", "email"]
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2Implicit.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/authorize",
client_id=client_id,
response_type=kwargs.pop("response_type", "id_token"),
token_field_name=kwargs.pop("token_field_name", "id_token"),
nonce=kwargs.pop("nonce", None) or str(uuid.uuid4()),
**kwargs,
)
class OktaAuthorizationCode(OAuth2AuthorizationCode):
"""
Describes an Okta (OAuth 2) "Access Token" authorization code flow requests authentication.
"""
def __init__(self, instance: str, client_id: str, **kwargs):
"""
:param instance: Okta instance (like "testserver.okta-emea.com")
:param client_id: Okta Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
token by default.
:param token_field_name: Name of the expected field containing the token.
access_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param authorization_server: Okta authorization server
default by default.
:param scope: Scope parameter sent in query. Can also be a list of scopes.
Request 'openid' by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param session: requests.Session instance that will be used to request the token.
Use it to provide a custom proxying rule for instance.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL.
Usual parameters are:
* prompt: none to avoid prompting the user if a session is already opened.
"""
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", "openid")
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2AuthorizationCode.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/authorize",
f"https://{instance}/oauth2/{authorization_server}/v1/token",
client_id=client_id,
**kwargs,
)
class OktaAuthorizationCodePKCE(OAuth2AuthorizationCodePKCE):
"""
Describes an Okta (OAuth 2) "Access Token" Proof Key for Code Exchange (PKCE) flow requests authentication.
"""
def __init__(self, instance: str, client_id: str, **kwargs):
"""
:param instance: Okta instance (like "testserver.okta-emea.com")
:param client_id: Okta Application Identifier (formatted as an Universal Unique Identifier)
:param response_type: Value of the response_type query parameter.
code by default.
:param token_field_name: Name of the expected field containing the token.
access_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param code_field_name: Field name containing the code. code by default.
:param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
(formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default.
:param authorization_server: Okta authorization server
default by default.
:param scope: Scope parameter sent in query. Can also be a list of scopes.
Request 'openid' by default.
:param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way:
http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root).
:param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started.
Listen on port 5000 by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param success_display_time: In case a token is successfully received,
this is the maximum amount of milliseconds the success page will be displayed in your browser.
Display the page for 1 millisecond by default.
:param failure_display_time: In case received token is not valid,
this is the maximum amount of milliseconds the failure page will be displayed in your browser.
Display the page for 5 seconds by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param session: requests.Session instance that will be used to request the token.
Use it to provide a custom proxying rule for instance.
:param kwargs: all additional authorization parameters that should be put as query parameter
in the authorization URL and as body parameters in the token URL.
Usual parameters are:
* client_secret: If client is not authenticated with the authorization server
* nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details
"""
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", "openid")
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2AuthorizationCodePKCE.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/authorize",
f"https://{instance}/oauth2/{authorization_server}/v1/token",
client_id=client_id,
**kwargs,
)
class OktaClientCredentials(OAuth2ClientCredentials):
"""
Describes an Okta (OAuth 2) client credentials (also called application) flow requests authentication.
"""
def __init__(self, instance: str, client_id: str, client_secret: str, **kwargs):
"""
:param instance: Okta instance (like "testserver.okta-emea.com")
:param client_id: Okta Application Identifier (formatted as an Universal Unique Identifier)
:param client_secret: Resource owner password.
:param authorization_server: Okta authorization server
default by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param scope: Scope parameter sent to token URL as body. Can also be a list of scopes.
Request 'openid' by default.
:param token_field_name: Field name containing the token. access_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param session: requests.Session instance that will be used to request the token.
Use it to provide a custom proxying rule for instance.
:param kwargs: all additional authorization parameters that should be put as query parameter in the token URL.
"""
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", "openid")
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2ClientCredentials.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/token",
client_id=client_id,
client_secret=client_secret,
**kwargs,
)
class OktaResourceOwnerPasswordCredentials(OAuth2ResourceOwnerPasswordCredentials):
"""
Describes an Okta (OAuth 2) resource owner password credentials (also called password) flow requests authentication.
"""
def __init__(
self,
instance: str,
username: str,
password: str,
client_id: str,
client_secret: str,
**kwargs,
):
"""
:param instance: Okta instance (like "testserver.okta-emea.com")
:param username: Resource owner user name.
:param password: Resource owner password.
:param client_id: Okta Application Identifier (formatted as an Universal Unique Identifier)
:param client_secret: Resource owner password.
:param authorization_server: Okta authorization server
default by default.
:param timeout: Maximum amount of seconds to wait for a token to be received once requested.
Wait for 1 minute by default.
:param header_name: Name of the header field used to send token.
Token will be sent in Authorization header field by default.
:param header_value: Format used to send the token value.
"{token}" must be present as it will be replaced by the actual token.
Token will be sent as "Bearer {token}" by default.
:param scope: Scope parameter sent to token URL as body. Can also be a list of scopes.
Request 'openid' by default.
:param token_field_name: Field name containing the token. access_token by default.
:param early_expiry: Number of seconds before actual token expiry where token will be considered as expired.
Default to 30 seconds to ensure token will not expire between the time of retrieval and the time the request
reaches the actual server. Set it to 0 to deactivate this feature and use the same token until actual expiry.
:param session: requests.Session instance that will be used to request the token.
Use it to provide a custom proxying rule for instance.
:param kwargs: all additional authorization parameters that should be put as body parameters in the token URL.
"""
if not instance:
raise Exception("Instance is mandatory.")
if not client_id:
raise Exception("Client ID is mandatory.")
if not client_secret:
raise Exception("Client secret is mandatory.")
authorization_server = kwargs.pop("authorization_server", None) or "default"
scopes = kwargs.pop("scope", "openid")
kwargs["scope"] = " ".join(scopes) if isinstance(scopes, list) else scopes
OAuth2ResourceOwnerPasswordCredentials.__init__(
self,
f"https://{instance}/oauth2/{authorization_server}/v1/token",
username=username,
password=password,
session_auth=(client_id, client_secret),
**kwargs,
)
class HeaderApiKey(requests.auth.AuthBase, SupportMultiAuth):
"""Describes an API Key requests authentication."""
def __init__(self, api_key: str, header_name: str = None):
"""
:param api_key: The API key that will be sent.
:param header_name: Name of the header field. "X-API-Key" by default.
"""
self.api_key = api_key
if not api_key:
raise Exception("API Key is mandatory.")
self.header_name = header_name or "X-API-Key"
def __call__(self, r):
r.headers[self.header_name] = self.api_key
return r
class QueryApiKey(requests.auth.AuthBase, SupportMultiAuth):
"""Describes an API Key requests authentication."""
def __init__(self, api_key: str, query_parameter_name: str = None):
"""
:param api_key: The API key that will be sent.
:param query_parameter_name: Name of the query parameter. "api_key" by default.
"""
self.api_key = api_key
if not api_key:
raise Exception("API Key is mandatory.")
self.query_parameter_name = query_parameter_name or "api_key"
def __call__(self, r):
r.url = _add_parameters(r.url, {self.query_parameter_name: self.api_key})
return r
class Basic(requests.auth.HTTPBasicAuth, SupportMultiAuth):
"""Describes a basic requests authentication."""
def __init__(self, username: str, password: str):
requests.auth.HTTPBasicAuth.__init__(self, username, password)
class NTLM(requests.auth.AuthBase, SupportMultiAuth):
"""Describes a NTLM requests authentication."""
def __init__(self, username: str = None, password: str = None):
"""
:param username: Mandatory if requests_negotiate_sspi module is not installed.
:param password: Mandatory if requests_negotiate_sspi module is not installed.
"""
self.username = username
self.password = password
if not username and not password:
try:
import requests_negotiate_sspi
self.auth = requests_negotiate_sspi.HttpNegotiateAuth()
except ImportError:
raise Exception(
"NTLM authentication requires requests_negotiate_sspi module."
)
else:
if not username:
raise Exception(
'NTLM authentication requires "username" to be provided in security_details.'
)
if not password:
raise Exception(
'NTLM authentication requires "password" to be provided in security_details.'
)
try:
import requests_ntlm
self.auth = requests_ntlm.HttpNtlmAuth(username, password)
except ImportError:
raise Exception("NTLM authentication requires requests_ntlm module.")
def __call__(self, r):
self.auth.__call__(r)
return r
class _MultiAuth(requests.auth.AuthBase):
"""Authentication using multiple authentication methods."""
def __init__(self, *authentication_modes):
self.authentication_modes = authentication_modes
def __call__(self, r):
for authentication_mode in self.authentication_modes:
authentication_mode.__call__(r)
return r
def __add__(self, other):
if isinstance(other, _MultiAuth):
return _MultiAuth(*self.authentication_modes, *other.authentication_modes)
return _MultiAuth(*self.authentication_modes, other)
def __and__(self, other):
if isinstance(other, _MultiAuth):
return _MultiAuth(*self.authentication_modes, *other.authentication_modes)
return _MultiAuth(*self.authentication_modes, other)
class Auths(_MultiAuth):
def __init__(self, *authentication_modes):
warnings.warn(
"Auths class will be removed in the future. Use + instead.",
DeprecationWarning,
)
super().__init__(*authentication_modes) | /requests_auth-7.0.0-py3-none-any.whl/requests_auth/authentication.py | 0.847905 | 0.232691 | authentication.py | pypi |
[](https://pypi.python.org/pypi/requests-aws4auth)
[](https://pypi.python.org/pypi/requests-aws4auth)
Amazon Web Services version 4 authentication for the Python [Requests](https://github.com/kennethreitz/requests) library.
Features
========
- Requests authentication for all AWS services that support AWS auth v4
- Independent signing key objects
- Automatic regeneration of keys when scope date boundary is passed
- Support for STS temporary credentials
Implements header-based authentication, GET URL parameter and POST
parameter authentication are not supported.
Supported Services
==================
This package has been tested as working against:
AppStream, AppSync, Auto-Scaling, CloudFormation, CloudFront, CloudHSM,
CloudSearch, CloudTrail, CloudWatch Monitoring, CloudWatch Logs,
CodeDeploy, Cognito Identity, Cognito Sync, Config, DataPipeline, Direct
Connect, DynamoDB, Elastic Beanstalk, ElastiCache, EC2, EC2 Container
Service, Elastic Load Balancing, Elastic MapReduce, ElasticSearch,
Elastic Transcoder, Glacier, Identity and Access Management (IAM), Key
Management Service (KMS), Kinesis, Lambda, Opsworks, Redshift,
Relational Database Service (RDS), Route 53, Simple Storage Service
(S3), Simple Notification Service (SNS), Simple Queue Service (SQS),
Storage Gateway, Security Token Service (STS)
The following services do not support AWS auth version 4 and are not
usable with this package:
Simple Email Service (SES), Simple Workflow Service (SWF),
Import/Export, SimpleDB, DevPay, Mechanical Turk
The AWS Support API has not been tested as it requires a premium
subscription.
Python versions
========
In the 1.x semantic versions, the minimum python support will be gradually raised:
* 1.0.x: Support python2.7 and python3.3+.
* 1.1.x: python2.7 is not supported, is best-effort. Support python3.3+.
* 1.2.x: [Requires-Python](https://packaging.python.org/guides/dropping-older-python-versions/#specify-the-version-ranges-for-supported-python-distributions) will be set to python3.3+, explicitly removing earlier versions. python<3.7 is not supported, is best-effort.
* 1.3.x: [Requires-Python](https://packaging.python.org/guides/dropping-older-python-versions/#specify-the-version-ranges-for-supported-python-distributions) will be set to python3.7+, explicitly removing earlier versions. (best-effort is TBD)
Installation
============
Install via pip:
``` {.sourceCode .bash}
$ pip install requests-aws4auth
```
requests-aws4auth requires the
[Requests](https://github.com/kennethreitz/requests) library by Kenneth
Reitz.
requests-aws4auth is tested on Python 2.7 and 3.5 and up.
Behaviour changes in 0.8
========================
Version 0.8 introduces request date checking and automatic key
regeneration behaviour as default. This has implications for sharing
authentication objects between threads, and for storage of secret keys.
See the relevant sections below for details. See also the discussion in
[GitHub issue
\#10](https://github.com/sam-washington/requests-aws4auth/issues/10).
Basic usage
===========
``` {.sourceCode .python}
>>> import requests
>>> from requests_aws4auth import AWS4Auth
>>> endpoint = 'http://s3-eu-west-1.amazonaws.com'
>>> auth = AWS4Auth('<ACCESS ID>', '<ACCESS KEY>', 'eu-west-1', 's3')
>>> response = requests.get(endpoint, auth=auth)
>>> response.text
<?xml version="1.0" encoding="UTF-8"?>
<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f461ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
...
```
This example would list your buckets in the `eu-west-1` region of the
Amazon S3 service.
STS Temporary Credentials
=========================
``` {.sourceCode .python}
>>> from requests_aws4auth import AWS4Auth
>>> auth = AWS4Auth('<ACCESS ID>', '<ACCESS KEY>', 'eu-west-1', 's3',
session_token='<SESSION TOKEN>')
...
```
This example shows how to construct an AWS4Auth object for use with STS
temporary credentials. The `x-amz-security-token` header is added with
the session token. Temporary credential timeouts are not managed \-- in
case the temporary credentials expire, they need to be re-generated and
the AWS4Auth object re-constructed with the new credentials.
Dynamic STS Credentials using botocore RefreshableCredentials
=============================================================
``` {.sourceCode .python}
>>> from requests_aws4auth import AWS4Auth
>>> from botocore.session import Session
>>> credentials = Session().get_credentials()
>>> auth = AWS4Auth(region='eu-west-1', service='es',
refreshable_credentials=credentials)
...
```
This example shows how to construct an AWS4Auth instance with
automatically refreshing credentials, suitable for long-running
applications using AWS IAM assume-role.
The RefreshableCredentials instance is used to generate valid static
credentials per-request, eliminating the need to recreate the AWS4Auth
instance when temporary credentials expire.
Date handling
=============
If an HTTP request to be authenticated contains a `Date` or `X-Amz-Date`
header, AWS will only accept the authorised request if the date in the
header matches the scope date of the signing key (see the [AWS REST API date
docs](http://docs.aws.amazon.com/general/latest/gr/sigv4-date-handling.html).)).
From version 0.8 of requests-aws4auth, if the header date does not match
the scope date, an `AWS4Auth` instance will automatically regenerate its
signing key, using the same scope parameters as the previous key except
for the date, which will be changed to match the request date. If a
request does not include a date, the current date is added to the
request in an `X-Amz-Date` header, and the signing key is regenerated if
this differs from the scope date.
This means that `AWS4Auth` now extracts and parses dates from the values
of `X-Amz-Date` and `Date` headers. Supported date formats are:
- RFC 7231 (e.g. Mon, 09 Sep 2011 23:36:00 GMT)
- RFC 850 (e.g. Sunday, 06-Nov-94 08:49:37 GMT)
- C time (e.g. Wed Dec 4 00:00:00 2002)
- Amz-Date format (e.g. 20090325T010101Z)
- ISO 8601 / RFC 3339 (e.g. 2009-03-25T10:11:12.13-01:00)
If either header is present but `AWS4Auth` cannot extract a date because
all present date headers are in an unrecognisable format, `AWS4Auth`
will delete any `X-Amz-Date` and `Date` headers present and replace with
a single `X-Amz-Date` header containing the current date. This behaviour
can be modified using the `raise_invalid_date` keyword argument of the
`AWS4Auth` constructor.
Automatic key regeneration
==========================
If you do not want the signing key to be automatically regenerated when
a mismatch between the request date and the scope date is encountered,
use the alternative `StrictAWS4Auth` class, which is identical to
`AWS4Auth` except that upon encountering a date mismatch it just raises
a `DateMismatchError`. You can also use the `PassiveAWS4Auth` class,
which mimics the `AWS4Auth` behaviour prior to version 0.8 and just
signs and sends the request, whether the date matches or not. In this
case it is up to the calling code to handle an authentication failure
response from AWS caused by the date mismatch.
Secret key storage
==================
To allow automatic key regeneration, the secret key is stored in the
`AWS4Auth` instance, in the signing key object. If you do not want this
to occur, instantiate the instance using an `AWS4Signing` key which was
created with the `store_secret_key` parameter set to False:
``` {.sourceCode .python}
>>> sig_key = AWS4SigningKey(secret_key, region, service, date, False)
>>> auth = StrictAWS4Auth(access_id, sig_key)
```
The `AWS4Auth` class will then raise a `NoSecretKeyError` when it
attempts to regenerate its key. A slightly more conceptually elegant way
to handle this is to use the alternative `StrictAWS4Auth` class, again
instantiating it with an `AWS4SigningKey` instance created with
`store_secret_key = False`.
Multithreading
==============
If you share `AWS4Auth` (or even `StrictAWS4Auth`) instances between
threads you are likely to encounter problems. Because `AWS4Auth`
instances may unpredictably regenerate their signing key as part of
signing a request, threads using the same instance may find the key
changed by another thread halfway through the signing process, which may
result in undefined behaviour.
It may be possible to rig up a workable instance sharing mechanism using
locking primitives and the `StrictAWS4Auth` class, however this poor
author can\'t think of a scenario which works safely yet doesn\'t suffer
from at some point blocking all threads for at least the duration of an
HTTP request, which could be several seconds. If several requests come
in in close succession which all require key regenerations then the
system could be forced into serial operation for quite a length of time.
In short, it\'s probably best to create a thread-local instance of
`AWS4Auth` for each thread that needs to do authentication.
API reference
=============
See the doctrings in `aws4auth.py` and `aws4signingkey.py`.
Testing
=======
A test suite is included in the test folder.
The package passes all tests in the AWS auth v4
[test_suite](http://docs.aws.amazon.com/general/latest/gr/signature-v4-test-suite.html),
and contains tests against the supported live services. See docstrings
in `test/requests_aws4auth_test.py` for details about running the tests.
Connection parameters are included in the tests for the AWS Support API,
should you have access and want to try it. The documentation says it
supports auth v4 so it should work if you have a subscription. Do pass
on your results!
Unsupported AWS features / todo
===============================
- Currently does not support Amazon S3 chunked uploads
- Tests for new AWS services
- Requires Requests library to be present even if only using
AWS4SigningKey
- Coherent documentation
Version release notes
=====================
- update `HISTORY.md`
- update `requests_aws4auth/__init__.py`
- create a [release](https://github.com/tedder/requests-aws4auth/releases) on github
prep:
```
python3 -m pip install --user --upgrade setuptools wheel testresources twine
```
build and release, creds in `~/.pypirc`:
```
rm -f dist/*; \
python3 setup.py sdist bdist_wheel && \
python3 -m twine upload --repository testpypi_requests_aws4auth dist/* && \
python3 -m twine upload --repository pypi dist/*
```
| /requests-aws4auth-1.2.3.tar.gz/requests-aws4auth-1.2.3/README.md | 0.787114 | 0.812459 | README.md | pypi |
from __future__ import unicode_literals
import hmac
import hashlib
from warnings import warn
from datetime import datetime
from six import text_type
class AWS4SigningKey:
"""
AWS signing key. Used to sign AWS authentication strings.
The secret key is stored in the instance after instantiation, this can be
changed via the store_secret_key argument, see below for details.
Methods:
generate_key() -- Generate AWS4 Signing Key string
sign_sha256() -- Generate SHA256 HMAC signature, encoding message to bytes
first if required
Attributes:
region -- AWS region the key is scoped for
service -- AWS service the key is scoped for
date -- Date the key is scoped for
scope -- The AWS scope string for this key, calculated from the above
attributes
key -- The signing key string itself
amz_date -- Deprecated name for 'date'. Use the 'date' attribute instead.
amz_date will be removed in a future version.
"""
def __init__(self, secret_key, region, service, date=None,
store_secret_key=True):
"""
>>> AWS4SigningKey(secret_key, region, service[, date]
... [, store_secret_key])
secret_key -- This is your AWS secret access key
region -- The region you're connecting to, as per list at
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
e.g. us-east-1. For services which don't require a
region (e.g. IAM), use us-east-1.
service -- The name of the service you're connecting to, as per
endpoints at:
http://docs.aws.amazon.com/general/latest/gr/rande.html
e.g. elasticbeanstalk
date -- 8-digit date of the form YYYYMMDD. Key is only valid for
requests with a Date or X-Amz-Date header matching this
date. If date is not supplied the current date is
used.
store_secret_key
-- Whether the secret key is stored in the instance. By
default this is True, meaning the key is stored in
the secret_key property and is available to any
code the instance is passed to. Having the secret
key retained makes it easier to regenerate the key
if a scope parameter changes (usually the date).
This is used by the AWS4Auth class to perform its
automatic key updates when a request date/scope date
mismatch is encountered.
If you are passing instances to untrusted code you can
set this to False. This will cause the secret key to be
discarded as soon as the signing key has been generated.
Note though that you will need to manually regenerate
keys when needed (or if you use the regenerate_key()
method on an AWS4Auth instance you will need to pass it
the secret key).
All arguments should be supplied as strings.
"""
self.region = region
self.service = service
self.date = date or datetime.utcnow().strftime('%Y%m%d')
self.scope = '{}/{}/{}/aws4_request'.format(self.date, self.region, self.service)
self.store_secret_key = store_secret_key
self.secret_key = secret_key if self.store_secret_key else None
self.key = self.generate_key(secret_key, self.region, self.service, self.date)
@classmethod
def generate_key(cls, secret_key, region, service, date,
intermediates=False):
"""
Generate the signing key string as bytes.
If intermediate is set to True, returns a 4-tuple containing the key
and the intermediate keys:
( signing_key, date_key, region_key, service_key )
The intermediate keys can be used for testing against examples from
Amazon.
"""
init_key = ('AWS4' + secret_key).encode('utf-8')
date_key = cls.sign_sha256(init_key, date)
region_key = cls.sign_sha256(date_key, region)
service_key = cls.sign_sha256(region_key, service)
key = cls.sign_sha256(service_key, 'aws4_request')
if intermediates:
return (key, date_key, region_key, service_key)
else:
return key
@staticmethod
def sign_sha256(key, msg):
"""
Generate an SHA256 HMAC, encoding msg to UTF-8 if not
already encoded.
key -- signing key. bytes.
msg -- message to sign. unicode or bytes.
"""
if isinstance(msg, text_type):
msg = msg.encode('utf-8')
return hmac.new(key, msg, hashlib.sha256).digest()
@property
def amz_date(self):
msg = ("This attribute has been renamed to 'date'. 'amz_date' is "
"deprecated and will be removed in a future version.")
warn(msg, DeprecationWarning)
return self.date | /requests-aws4auth-1.2.3.tar.gz/requests-aws4auth-1.2.3/requests_aws4auth/aws4signingkey.py | 0.798108 | 0.153011 | aws4signingkey.py | pypi |
import requests
__all__ = ["URL"]
if requests.compat.is_py2:
from urlparse import parse_qs
elif requests.compat.is_py3:
from urllib.parse import parse_qs
from . import utils
class URL(object):
def __init__(self, url_str):
"""parse url query part into dict
:param url: url to parse
Usage::
>>> params = parse_query("http://www.example.com/?foo=bar&baz")
{'foo': 'bar', 'baz': None}
"""
parsed_url = requests.compat.urlparse(utils.to_str(url_str))
netloc_parts = parsed_url.netloc.split("@")
if len(netloc_parts) == 1:
username = password = None
host_str = netloc_parts[0]
else:
username, password = netloc_parts[0].split(":")
host_str = netloc_parts[1]
host_parts = host_str.split(":")
host = host_parts[0]
if len(host_parts) == 1:
port = 80
else:
port = int(host_parts[1])
params = [
(key, val[0] if val[0] else None)
for key, val in parse_qs(parsed_url.query, True).items()
]
self._info = dict(
scheme=parsed_url.scheme or "http",
username=username,
password=password,
host=host,
port=port,
path=parsed_url.path or "/",
params=params,
fragment=parsed_url.fragment
)
self._url = None
def forge(self, **kwargs):
if kwargs:
self._info["params"].sort(**kwargs)
parts = [
"{0}://".format(self.scheme),
self.netloc,
self.uri
]
self._url = utils.to_str("".join(parts))
return self._url
def __str__(self):
if self._url is None:
self.forge()
return self._url
@property
def scheme(self):
return self._info["scheme"]
@property
def netloc(self):
if self.username is None or self.password is None:
netloc = self.host
else:
netloc = "{0}:{1}@{2}".format(
self.username,
self.password,
self.host
)
if self.port != 80:
netloc = "{0}:{1}".format(netloc, self.port)
return netloc
@property
def username(self):
return self._info["username"]
@property
def password(self):
return self._info["password"]
@property
def host(self):
return self._info["host"]
@property
def port(self):
return self._info["port"]
@property
def path(self):
return self._info["path"]
@path.setter
def path(self, new_val):
self._info["path"] = new_val
return new_val
@property
def params(self):
return dict(self._info["params"])
@params.setter
def params(self, new_params):
if isinstance(new_params, dict):
self._info["params"] = list(new_params.items())
else:
self._info["params"] = list(new_params)
return new_params
@property
def query(self):
query = []
for key, val in self._info["params"]:
if val is None:
query.append(requests.compat.quote(utils.to_bytes(key)))
else:
param = {utils.to_bytes(key): utils.to_bytes(val)}
query.append(requests.compat.urlencode(param))
return "&".join(query)
@property
def uri(self):
query = self.query
fragment = self.fragment
return "".join([
self.path,
"?{0}".format(query) if query else "",
"#{0}".format(fragment) if fragment else ""
])
def append_params(self, new_params):
params = dict(self.params, **dict(new_params))
self.params = params
return self.params
@property
def fragment(self):
return self._info["fragment"] | /requests-bce-0.0.5.tar.gz/requests-bce-0.0.5/bceauth/url.py | 0.535827 | 0.157137 | url.py | pypi |
from collections import MutableMapping
import sqlite3 as sqlite
from contextlib import contextmanager
try:
import threading
except ImportError:
import dummy_threading as threading
try:
import cPickle as pickle
except ImportError:
import pickle
from requests_cache.compat import bytes
class DbDict(MutableMapping):
""" DbDict - a dictionary-like object for saving large datasets to `sqlite` database
It's possible to create multiply DbDict instances, which will be stored as separate
tables in one database::
d1 = DbDict('test', 'table1')
d2 = DbDict('test', 'table2')
d3 = DbDict('test', 'table3')
all data will be stored in ``test.sqlite`` database into
correspondent tables: ``table1``, ``table2`` and ``table3``
"""
def __init__(self, filename, table_name='data', fast_save=False, **options):
"""
:param filename: filename for database (without extension)
:param table_name: table name
:param fast_save: If it's True, then sqlite will be configured with
`"PRAGMA synchronous = 0;" <http://www.sqlite.org/pragma.html#pragma_synchronous>`_
to speedup cache saving, but be careful, it's dangerous.
Tests showed that insertion order of records can be wrong with this option.
"""
self.filename = filename
self.table_name = table_name
self.fast_save = fast_save
#: Transactions can be commited if this property is set to `True`
self.can_commit = True
self._bulk_commit = False
self._pending_connection = None
self._lock = threading.RLock()
with self.connection() as con:
con.execute("create table if not exists `%s` (key PRIMARY KEY, value)" % self.table_name)
@contextmanager
def connection(self, commit_on_success=False):
with self._lock:
if self._bulk_commit:
if self._pending_connection is None:
self._pending_connection = sqlite.connect(self.filename)
con = self._pending_connection
else:
con = sqlite.connect(self.filename)
try:
if self.fast_save:
con.execute("PRAGMA synchronous = 0;")
yield con
if commit_on_success and self.can_commit:
con.commit()
finally:
if not self._bulk_commit:
con.close()
def commit(self, force=False):
"""
Commits pending transaction if :attr:`can_commit` or `force` is `True`
:param force: force commit, ignore :attr:`can_commit`
"""
if force or self.can_commit:
if self._pending_connection is not None:
self._pending_connection.commit()
@contextmanager
def bulk_commit(self):
"""
Context manager used to speedup insertion of big number of records
::
>>> d1 = DbDict('test')
>>> with d1.bulk_commit():
... for i in range(1000):
... d1[i] = i * 2
"""
self._bulk_commit = True
self.can_commit = False
try:
yield
self.commit(True)
finally:
self._bulk_commit = False
self.can_commit = True
self._pending_connection.close()
self._pending_connection = None
def __getitem__(self, key):
with self.connection() as con:
row = con.execute("select value from `%s` where key=?" %
self.table_name, (key,)).fetchone()
if not row:
raise KeyError
return row[0]
def __setitem__(self, key, item):
with self.connection(True) as con:
con.execute("insert or replace into `%s` (key,value) values (?,?)" %
self.table_name, (key, item))
def __delitem__(self, key):
with self.connection(True) as con:
cur = con.execute("delete from `%s` where key=?" %
self.table_name, (key,))
if not cur.rowcount:
raise KeyError
def __iter__(self):
with self.connection() as con:
for row in con.execute("select key from `%s`" %
self.table_name):
yield row[0]
def __len__(self):
with self.connection() as con:
return con.execute("select count(key) from `%s`" %
self.table_name).fetchone()[0]
def clear(self):
with self.connection(True) as con:
con.execute("drop table `%s`" % self.table_name)
con.execute("create table `%s` (key PRIMARY KEY, value)" %
self.table_name)
def __str__(self):
return str(dict(self.items()))
class DbPickleDict(DbDict):
""" Same as :class:`DbDict`, but pickles values before saving
"""
def __setitem__(self, key, item):
super(DbPickleDict, self).__setitem__(key,
sqlite.Binary(pickle.dumps(item)))
def __getitem__(self, key):
return pickle.loads(bytes(super(DbPickleDict, self).__getitem__(key))) | /requests_cache_latest-0.4.12-py3-none-any.whl/requests_cache/backends/storage/dbdict.py | 0.681409 | 0.170957 | dbdict.py | pypi |
from __future__ import annotations
import json
from contextlib import nullcontext
from hashlib import blake2b
from logging import getLogger
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Tuple,
Union,
)
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
from requests import Request, Session
from requests.models import CaseInsensitiveDict
from url_normalize import url_normalize
from ._utils import decode, encode, patch_form_boundary
__all__ = [
'create_key',
'normalize_body',
'normalize_headers',
'normalize_request',
'normalize_params',
'normalize_url',
]
if TYPE_CHECKING:
from .models import AnyPreparedRequest, AnyRequest, CachedResponse
# Maximum JSON request body size that will be filtered and normalized
MAX_NORM_BODY_SIZE = 10 * 1024 * 1024
KVList = List[Tuple[str, str]]
ParamList = Optional[Iterable[str]]
RequestContent = Union[Mapping, str, bytes]
logger = getLogger(__name__)
def create_key(
request: AnyRequest,
ignored_parameters: ParamList = None,
match_headers: Union[ParamList, bool] = False,
serializer: Any = None,
**request_kwargs,
) -> str:
"""Create a normalized cache key based on a request object
Args:
request: Request object to generate a cache key from
ignored_parameters: Request paramters, headers, and/or JSON body params to exclude
match_headers: Match only the specified headers, or ``True`` to match all headers
request_kwargs: Additional keyword arguments for :py:func:`~requests.request`
"""
# Normalize and gather all relevant request info to match against
request = normalize_request(request, ignored_parameters)
key_parts = [
request.method or '',
request.url,
request.body or '',
request_kwargs.get('verify', True),
*get_matched_headers(request.headers, match_headers),
str(serializer),
]
# Generate a hash based on this info
key = blake2b(digest_size=8)
for part in key_parts:
key.update(encode(part))
return key.hexdigest()
def get_matched_headers(
headers: CaseInsensitiveDict, match_headers: Union[ParamList, bool]
) -> List[str]:
"""Get only the headers we should match against as a list of ``k=v`` strings, given an optional
include list.
"""
if not match_headers:
return []
if match_headers is True:
match_headers = headers
return [
f'{k.lower()}={headers[k]}'
for k in sorted(match_headers, key=lambda x: x.lower())
if k in headers
]
def normalize_request(
request: AnyRequest, ignored_parameters: ParamList = None
) -> AnyPreparedRequest:
"""Normalize and remove ignored parameters from request URL, body, and headers.
This is used for both:
* Increasing cache hits by generating more precise cache keys
* Redacting potentially sensitive info from cached requests
Args:
request: Request object to normalize
ignored_parameters: Request paramters, headers, and/or JSON body params to exclude
"""
if isinstance(request, Request):
# For a multipart POST request that hasn't been prepared, we need to patch the form boundary
# so the request body will have a consistent hash
with patch_form_boundary() if request.files else nullcontext():
norm_request: AnyPreparedRequest = Session().prepare_request(request)
else:
norm_request = request.copy()
norm_request.method = (norm_request.method or '').upper()
norm_request.url = normalize_url(norm_request.url or '', ignored_parameters)
norm_request.headers = normalize_headers(norm_request.headers, ignored_parameters)
norm_request.body = normalize_body(norm_request, ignored_parameters)
return norm_request
def normalize_headers(
headers: MutableMapping[str, str], ignored_parameters: ParamList = None
) -> CaseInsensitiveDict:
"""Sort and filter request headers, and normalize minor variations in multi-value headers"""
if ignored_parameters:
headers = filter_sort_dict(headers, ignored_parameters)
for k, v in headers.items():
if ',' in v:
values = [v.strip() for v in v.lower().split(',') if v.strip()]
headers[k] = ', '.join(sorted(values))
return CaseInsensitiveDict(headers)
def normalize_url(url: str, ignored_parameters: ParamList) -> str:
"""Normalize and filter a URL. This includes request parameters, IDN domains, scheme, host,
port, etc.
"""
url = filter_url(url, ignored_parameters)
return url_normalize(url)
def normalize_body(request: AnyPreparedRequest, ignored_parameters: ParamList) -> bytes:
"""Normalize and filter a request body if possible, depending on Content-Type"""
if not request.body:
return b''
content_type = request.headers.get('Content-Type')
# Filter and sort params if possible
filtered_body: Union[str, bytes] = request.body
if content_type == 'application/json':
filtered_body = normalize_json_body(request.body, ignored_parameters)
elif content_type == 'application/x-www-form-urlencoded':
filtered_body = normalize_params(request.body, ignored_parameters)
return encode(filtered_body)
def normalize_json_body(
original_body: Union[str, bytes], ignored_parameters: ParamList
) -> Union[str, bytes]:
"""Normalize and filter a request body with serialized JSON data"""
if len(original_body) <= 2 or len(original_body) > MAX_NORM_BODY_SIZE:
return original_body
try:
body = json.loads(decode(original_body))
body = filter_sort_json(body, ignored_parameters)
return json.dumps(body)
# If it's invalid JSON, then don't mess with it
except (AttributeError, TypeError, ValueError):
logger.debug('Invalid JSON body')
return original_body
def normalize_params(value: Union[str, bytes], ignored_parameters: ParamList = None) -> str:
"""Normalize and filter urlencoded params from either a URL or request body with form data"""
value = decode(value)
params = parse_qsl(value)
params = filter_sort_multidict(params, ignored_parameters)
query_str = urlencode(params)
# parse_qsl doesn't handle key-only params, so add those here
key_only_params = [k for k in value.split('&') if k and '=' not in k]
if key_only_params:
key_only_param_str = '&'.join(sorted(key_only_params))
query_str = f'{query_str}&{key_only_param_str}' if query_str else key_only_param_str
return query_str
def redact_response(response: CachedResponse, ignored_parameters: ParamList) -> CachedResponse:
"""Redact any ignored parameters (potentially containing sensitive info) from a cached request"""
if ignored_parameters:
response.url = filter_url(response.url, ignored_parameters)
response.request = normalize_request(response.request, ignored_parameters) # type: ignore
return response
def filter_sort_json(data: Union[List, Mapping], ignored_parameters: ParamList):
if isinstance(data, Mapping):
return filter_sort_dict(data, ignored_parameters)
else:
return filter_sort_list(data, ignored_parameters)
def filter_sort_dict(
data: Mapping[str, str], ignored_parameters: ParamList = None
) -> Dict[str, str]:
# Note: Any ignored_parameters present will have their values replaced instead of removing the
# parameter, so the cache key will still match whether the parameter was present or not.
ignored_parameters = set(ignored_parameters or [])
return {k: ('REDACTED' if k in ignored_parameters else v) for k, v in sorted(data.items())}
def filter_sort_multidict(data: KVList, ignored_parameters: ParamList = None) -> KVList:
ignored_parameters = set(ignored_parameters or [])
return [(k, 'REDACTED' if k in ignored_parameters else v) for k, v in sorted(data)]
def filter_sort_list(data: List, ignored_parameters: ParamList = None) -> List:
if not ignored_parameters:
return sorted(data)
return [k for k in sorted(data) if k not in set(ignored_parameters)]
def filter_url(url: str, ignored_parameters: ParamList) -> str:
"""Filter ignored parameters out of a URL"""
# Strip query params from URL, sort and filter, and reassemble into a complete URL
url_tokens = urlparse(url)
return urlunparse(
(
url_tokens.scheme,
url_tokens.netloc,
url_tokens.path,
url_tokens.params,
normalize_params(url_tokens.query, ignored_parameters),
url_tokens.fragment,
)
) | /requests_cache-1.1.0-py3-none-any.whl/requests_cache/cache_keys.py | 0.88565 | 0.177205 | cache_keys.py | pypi |
from contextlib import contextmanager
from inspect import signature
from logging import getLogger
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple
from urllib3 import filepost
KwargDict = Dict[str, Any]
logger = getLogger('requests_cache')
def chunkify(iterable: Optional[Iterable], max_size: int) -> Iterator[List]:
"""Split an iterable into chunks of a max size"""
iterable = list(iterable or [])
for index in range(0, len(iterable), max_size):
yield iterable[index : index + max_size]
def coalesce(*values: Any, default=None) -> Any:
"""Get the first non-``None`` value in a list of values"""
return next((v for v in values if v is not None), default)
def decode(value, encoding='utf-8') -> str:
"""Decode a value from bytes, if hasn't already been.
Note: ``PreparedRequest.body`` is always encoded in utf-8.
"""
if not value:
return ''
return value.decode(encoding) if isinstance(value, bytes) else value
def encode(value, encoding='utf-8') -> bytes:
"""Encode a value to bytes, if it hasn't already been"""
if not value:
return b''
return value if isinstance(value, bytes) else str(value).encode(encoding)
def get_placeholder_class(original_exception: Optional[Exception] = None):
"""Create a placeholder type for a class that does not have dependencies installed.
This allows delaying ImportErrors until init time, rather than at import time.
"""
def _log_error():
msg = 'Dependencies are not installed for this feature'
logger.error(msg)
raise original_exception or ImportError(msg)
class Placeholder:
def __init__(self, *args, **kwargs):
_log_error()
def dumps(self, *args, **kwargs):
_log_error()
def loads(self, *args, **kwargs):
_log_error()
return Placeholder
def get_valid_kwargs(
func: Callable, kwargs: Dict, extras: Optional[Iterable[str]] = None
) -> KwargDict:
"""Get the subset of non-None ``kwargs`` that are valid arguments for ``func``"""
kwargs, _ = split_kwargs(func, kwargs, extras)
return {k: v for k, v in kwargs.items() if v is not None}
@contextmanager
def patch_form_boundary():
"""If the ``files`` param is present, patch the form boundary used to separate multipart
uploads. ``requests`` does not provide a way to pass a custom boundary to urllib3, so this just
monkey-patches it instead.
"""
original_boundary = filepost.choose_boundary
filepost.choose_boundary = lambda: '##requests-cache-form-boundary##'
yield
filepost.choose_boundary = original_boundary
def split_kwargs(
func: Callable, kwargs: Dict, extras: Optional[Iterable[str]] = None
) -> Tuple[KwargDict, KwargDict]:
"""Split ``kwargs`` into two dicts: those that are valid arguments for ``func``, and those that
are not
"""
params = list(signature(func).parameters)
params.extend(extras or [])
valid_kwargs = {k: v for k, v in kwargs.items() if k in params}
invalid_kwargs = {k: v for k, v in kwargs.items() if k not in params}
return valid_kwargs, invalid_kwargs
def try_int(value: Any) -> Optional[int]:
"""Convert a value to an int, if possible, otherwise ``None``"""
try:
return int(value)
except (TypeError, ValueError):
return None | /requests_cache-1.1.0-py3-none-any.whl/requests_cache/_utils.py | 0.867906 | 0.330768 | _utils.py | pypi |
from typing import Dict, Iterable, Union
from attr import define, field
from .._utils import get_valid_kwargs
from ..models import RichMixin
from . import ExpirationPattern, ExpirationTime, FilterCallback, KeyCallback
ALL_METHODS = ('GET', 'HEAD', 'OPTIONS', 'POST', 'PUT', 'PATCH', 'DELETE')
DEFAULT_CACHE_NAME = 'http_cache'
DEFAULT_METHODS = ('GET', 'HEAD')
DEFAULT_STATUS_CODES = (200,)
# Default params and/or headers that are excluded from cache keys and redacted from cached responses
DEFAULT_IGNORED_PARAMS = ('Authorization', 'X-API-KEY', 'access_token', 'api_key')
@define(repr=False)
class CacheSettings(RichMixin):
"""Class used internally to store settings that affect caching behavior. This allows settings
to be used across multiple modules, but exposed to the user in a single property
(:py:attr:`.CachedSession.settings`). These values can safely be modified after initialization.
See :py:class:`.CachedSession` and :ref:`user-guide` for usage details.
"""
allowable_codes: Iterable[int] = field(default=DEFAULT_STATUS_CODES)
allowable_methods: Iterable[str] = field(default=DEFAULT_METHODS)
always_revalidate: bool = field(default=None)
cache_control: bool = field(default=False)
disabled: bool = field(default=False)
expire_after: ExpirationTime = field(default=None)
filter_fn: FilterCallback = field(default=None)
ignored_parameters: Iterable[str] = field(default=DEFAULT_IGNORED_PARAMS)
key_fn: KeyCallback = field(default=None)
match_headers: Union[Iterable[str], bool] = field(default=False)
only_if_cached: bool = field(default=False)
stale_if_error: Union[bool, ExpirationTime] = field(default=False)
stale_while_revalidate: Union[bool, ExpirationTime] = field(default=False)
urls_expire_after: Dict[ExpirationPattern, ExpirationTime] = field(factory=dict)
@classmethod
def from_kwargs(cls, **kwargs):
"""Constructor with some additional steps:
* Handle some deprecated argument names
* Ignore invalid settings, for easier initialization from mixed ``**kwargs``
"""
kwargs = cls._rename_kwargs(kwargs)
kwargs = get_valid_kwargs(cls.__init__, kwargs)
return cls(**kwargs)
@staticmethod
def _rename_kwargs(kwargs):
if 'old_data_on_error' in kwargs:
kwargs['stale_if_error'] = kwargs.pop('old_data_on_error')
if 'include_get_headers' in kwargs:
kwargs['match_headers'] = kwargs.pop('include_get_headers')
return kwargs | /requests_cache-1.1.0-py3-none-any.whl/requests_cache/policy/settings.py | 0.850065 | 0.164483 | settings.py | pypi |
from datetime import datetime, timedelta, timezone
from email.utils import parsedate_to_datetime
from fnmatch import fnmatch
from logging import getLogger
from math import ceil
from typing import Optional
from typing import Pattern as RegexPattern
from .._utils import try_int
from . import ExpirationPattern, ExpirationPatterns, ExpirationTime
# Special expiration values that may be set by either headers or keyword args
DO_NOT_CACHE = 0x0D0E0200020704 # Per RFC 4824
EXPIRE_IMMEDIATELY = 0
NEVER_EXPIRE = -1
logger = getLogger(__name__)
def get_expiration_datetime(
expire_after: ExpirationTime,
start_time: Optional[datetime] = None,
negative_delta: bool = False,
ignore_invalid_httpdate: bool = False,
) -> Optional[datetime]:
"""Convert an expiration value in any supported format to an absolute datetime"""
# Never expire (or do not cache, in which case expiration won't be used)
if expire_after is None or expire_after in [NEVER_EXPIRE, DO_NOT_CACHE]:
return None
# Expire immediately
elif try_int(expire_after) == EXPIRE_IMMEDIATELY:
return start_time or datetime.utcnow()
# Already a datetime or httpdate str (allowed for headers only)
if isinstance(expire_after, str):
expire_after_dt = _parse_http_date(expire_after)
if not expire_after_dt and not ignore_invalid_httpdate:
raise ValueError(f'Invalid HTTP date: {expire_after}')
return expire_after_dt
elif isinstance(expire_after, datetime):
return _to_utc(expire_after)
# Otherwise, it must be a timedelta or time in seconds
if not isinstance(expire_after, timedelta):
expire_after = timedelta(seconds=expire_after)
if negative_delta:
expire_after = -expire_after
return (start_time or datetime.utcnow()) + expire_after
def get_expiration_seconds(expire_after: ExpirationTime) -> int:
"""Convert an expiration value in any supported format to an expiration time in seconds"""
if expire_after == DO_NOT_CACHE:
return DO_NOT_CACHE
expires = get_expiration_datetime(expire_after, ignore_invalid_httpdate=True)
return ceil((expires - datetime.utcnow()).total_seconds()) if expires else NEVER_EXPIRE
def get_url_expiration(
url: Optional[str], urls_expire_after: Optional[ExpirationPatterns] = None
) -> ExpirationTime:
"""Check for a matching per-URL expiration, if any"""
if not url:
return None
for pattern, expire_after in (urls_expire_after or {}).items():
if _url_match(url, pattern):
logger.debug(f'URL {url} matched pattern "{pattern}": {expire_after}')
return expire_after
return None
def _parse_http_date(value: str) -> Optional[datetime]:
"""Attempt to parse an HTTP (RFC 5322-compatible) timestamp"""
try:
expire_after = parsedate_to_datetime(value)
return _to_utc(expire_after)
except (TypeError, ValueError):
logger.debug(f'Failed to parse timestamp: {value}')
return None
def _to_utc(dt: datetime):
"""All internal datetimes are UTC and timezone-naive. Convert any user/header-provided
datetimes to the same format.
"""
if dt.tzinfo:
dt = dt.astimezone(timezone.utc)
dt = dt.replace(tzinfo=None)
return dt
def _url_match(url: str, pattern: ExpirationPattern) -> bool:
"""Determine if a URL matches a pattern
Args:
url: URL to test. Its base URL (without protocol) will be used.
pattern: Glob pattern to match against. A recursive wildcard will be added if not present
Example:
>>> url_match('https://httpbin.org/delay/1', 'httpbin.org/delay')
True
>>> url_match('https://httpbin.org/stream/1', 'httpbin.org/*/1')
True
>>> url_match('https://httpbin.org/stream/2', 'httpbin.org/*/1')
False
>>> url_match('https://httpbin.org/stream/2', re.compile('httpbin.org/*/\\d+'))
True
>>> url_match('https://httpbin.org/stream/x', re.compile('httpbin.org/*/\\d+'))
False
"""
if isinstance(pattern, RegexPattern):
match = pattern.search(url)
return match is not None
else:
url = url.split('://')[-1]
pattern = pattern.split('://')[-1].rstrip('*') + '**'
return fnmatch(url, pattern) | /requests_cache-1.1.0-py3-none-any.whl/requests_cache/policy/expiration.py | 0.851089 | 0.180179 | expiration.py | pypi |
from datetime import datetime, timedelta
from logging import DEBUG, getLogger
from typing import TYPE_CHECKING, Dict, List, MutableMapping, Optional, Union
from attr import define, field
from requests import PreparedRequest, Response
from .._utils import coalesce
from ..cache_keys import normalize_headers
from ..models import RichMixin
from . import (
DO_NOT_CACHE,
EXPIRE_IMMEDIATELY,
NEVER_EXPIRE,
CacheDirectives,
ExpirationTime,
KeyCallback,
get_expiration_datetime,
get_expiration_seconds,
get_url_expiration,
)
from .settings import CacheSettings
if TYPE_CHECKING:
from ..models import CachedResponse
logger = getLogger(__name__)
@define(repr=False)
class CacheActions(RichMixin):
"""Translates cache settings and headers into specific actions to take for a given cache item.
The resulting actions are then handled in :py:meth:`CachedSession.send`.
.. rubric:: Notes
* See :ref:`precedence` for behavior if multiple sources provide an expiration
* See :ref:`headers` for more details about header behavior
* The following arguments/properties are the outputs of this class:
Args:
cache_key: The cache key created based on the initial request
error_504: Indicates the request cannot be fulfilled based on cache settings
expire_after: User or header-provided expiration value
send_request: Send a new request
resend_request: Send a new request to refresh a stale cache item
resend_async: Return a stale cache item, and send a non-blocking request to refresh it
skip_read: Skip reading from the cache
skip_write: Skip writing to the cache
"""
# Outputs
cache_key: str = field(default=None, repr=False)
error_504: bool = field(default=False)
expire_after: ExpirationTime = field(default=None)
send_request: bool = field(default=False)
resend_request: bool = field(default=False)
resend_async: bool = field(default=False)
skip_read: bool = field(default=False)
skip_write: bool = field(default=False)
# Inputs
_directives: CacheDirectives = field(default=None, repr=False)
_settings: CacheSettings = field(default=None, repr=False)
# Temporary attributes
_only_if_cached: bool = field(default=False, repr=False)
_refresh: bool = field(default=False, repr=False)
_request: PreparedRequest = field(default=None, repr=False)
_stale_if_error: Union[bool, ExpirationTime] = field(default=None, repr=False)
_stale_while_revalidate: Union[bool, ExpirationTime] = field(default=None, repr=False)
_validation_headers: Dict[str, str] = field(factory=dict, repr=False)
@classmethod
def from_request(
cls, cache_key: str, request: PreparedRequest, settings: Optional[CacheSettings] = None
):
"""Initialize from request info and cache settings.
Note on refreshing: `must-revalidate` isn't a standard request header, but is used here to
indicate a user-requested refresh. Typically that's only used in response headers, and
`max-age=0` would be used by a client to request a refresh. However, this would conflict
with the `expire_after` option provided in :py:meth:`.CachedSession.request`.
Args:
request: The outgoing request
settings: Session-level cache settings
"""
settings = settings or CacheSettings()
directives = CacheDirectives.from_headers(request.headers)
logger.debug(f'Cache directives from request headers: {directives}')
# Merge values that may come from either settings or headers
only_if_cached = settings.only_if_cached or directives.only_if_cached
refresh = directives.max_age == EXPIRE_IMMEDIATELY or directives.must_revalidate
stale_if_error = settings.stale_if_error or directives.stale_if_error
stale_while_revalidate = (
settings.stale_while_revalidate or directives.stale_while_revalidate
)
# Check expiration values in order of precedence
expire_after = coalesce(
directives.max_age,
get_url_expiration(request.url, settings.urls_expire_after),
settings.expire_after,
)
# Check and log conditions for reading from the cache
read_criteria = {
'disabled cache': settings.disabled,
'disabled method': str(request.method) not in settings.allowable_methods,
'disabled by headers or refresh': directives.no_cache or directives.no_store,
'disabled by expiration': expire_after == DO_NOT_CACHE,
}
_log_cache_criteria('read', read_criteria)
actions = cls(
cache_key=cache_key,
directives=directives,
expire_after=expire_after,
only_if_cached=only_if_cached,
refresh=refresh,
request=request,
settings=settings,
skip_read=any(read_criteria.values()),
skip_write=directives.no_store,
stale_if_error=stale_if_error,
stale_while_revalidate=stale_while_revalidate,
)
return actions
@property
def expires(self) -> Optional[datetime]:
"""Convert the user/header-provided expiration value to a datetime. Applies to new cached
responses, and previously cached responses that are being revalidated.
"""
return get_expiration_datetime(self.expire_after)
def is_usable(self, cached_response: Optional['CachedResponse'], error: bool = False):
"""Determine whether a given cached response is "fresh enough" to satisfy the request,
based on:
* min-fresh
* max-stale
* stale-if-error (if an error has occured)
* stale-while-revalidate
"""
if cached_response is None:
return False
elif (
cached_response.expires is None
or (cached_response.is_expired and self._stale_while_revalidate is True)
or (error and self._stale_if_error is True)
):
return True
# Handle stale_if_error as a time value
elif error and self._stale_if_error:
offset = timedelta(seconds=get_expiration_seconds(self._stale_if_error))
# Handle stale_while_revalidate as a time value
elif cached_response.is_expired and self._stale_while_revalidate:
offset = timedelta(seconds=get_expiration_seconds(self._stale_while_revalidate))
# Handle min-fresh and max-stale
else:
offset = self._directives.get_expire_offset()
return datetime.utcnow() < cached_response.expires + offset
def update_from_cached_response(
self,
cached_response: Optional['CachedResponse'],
create_key: Optional[KeyCallback] = None,
**key_kwargs,
):
"""Determine if we can reuse a cached response, or set headers for a conditional request
if possible.
Used after fetching a cached response, but before potentially sending a new request.
Args:
cached_response: Cached response to examine
create_key: Cache key function, used for validating ``Vary`` headers
key_kwargs: Additional keyword arguments for ``create_key``.
"""
usable_response = self.is_usable(cached_response)
usable_if_error = self.is_usable(cached_response, error=True)
# Can't satisfy the request
if not usable_response and self._only_if_cached and not usable_if_error:
self.error_504 = True
# Send the request for the first time
elif cached_response is None:
self.send_request = True
# If response contains Vary and doesn't match, consider it a cache miss
elif create_key and not self._validate_vary(cached_response, create_key, **key_kwargs):
self.send_request = True
# Resend the request, unless settings permit a stale response
elif not usable_response and not (self._only_if_cached and usable_if_error):
self.resend_request = True
# Resend the request in the background; meanwhile return stale response
elif cached_response.is_expired and usable_response and self._stale_while_revalidate:
self.resend_async = True
if cached_response is not None and not self._only_if_cached:
self._update_validation_headers(cached_response)
logger.debug(f'Post-read cache actions: {self}')
def update_from_response(self, response: Response):
"""Update expiration + actions based on headers and other details from a new response.
Used after receiving a new response, but before saving it to the cache.
"""
directives = CacheDirectives.from_headers(response.headers)
if self._settings.cache_control:
self._update_from_response_headers(directives)
# If "expired" but there's a validator, save it to the cache and revalidate on use
skip_stale = self.expire_after == EXPIRE_IMMEDIATELY and not directives.has_validator
do_not_cache = self.expire_after == DO_NOT_CACHE
# Apply filter callback, if any
callback = self._settings.filter_fn
filtered_out = callback is not None and not callback(response)
# Check and log conditions for writing to the cache
write_criteria = {
'disabled cache': self._settings.disabled,
'disabled method': str(response.request.method) not in self._settings.allowable_methods,
'disabled status': response.status_code not in self._settings.allowable_codes,
'disabled by filter': filtered_out,
'disabled by headers': self.skip_write,
'disabled by expiration': do_not_cache or skip_stale,
}
self.skip_write = any(write_criteria.values())
_log_cache_criteria('write', write_criteria)
def update_request(self, request: PreparedRequest) -> PreparedRequest:
"""Apply validation headers (if any) before sending a request"""
request.headers.update(self._validation_headers)
return request
def update_revalidated_response(
self, response: Response, cached_response: 'CachedResponse'
) -> 'CachedResponse':
"""After revalidation, update the cached response's expiration and headers"""
logger.debug(f'Response for URL {response.request.url} has not been modified')
# Skip updating the cached response if expiration and headers are unchanged
# Ignore validators missing from new response, since they may be omitted
headers_changed = any(
cached_response.headers.get(k) != v for k, v in response.headers.items()
)
self.skip_write = self.expires == cached_response.expires and not headers_changed
cached_response.expires = self.expires
cached_response.headers.update(response.headers)
cached_response.revalidated = True
return cached_response
def _update_from_response_headers(self, directives: CacheDirectives):
"""Check response headers for expiration and other cache directives"""
logger.debug(f'Cache directives from response headers: {directives}')
self._stale_if_error = self._stale_if_error or directives.stale_if_error
if directives.immutable:
self.expire_after = NEVER_EXPIRE
else:
self.expire_after = coalesce(
directives.max_age,
directives.expires,
self.expire_after,
)
self.skip_write = self.skip_write or directives.no_store
def _update_validation_headers(self, cached_response: 'CachedResponse'):
"""If needed, get validation headers based on a cached response. Revalidation may be
triggered by a stale response, request headers, or cached response headers.
"""
directives = CacheDirectives.from_headers(cached_response.headers)
# These conditions always apply
revalidate = directives.has_validator and (
cached_response.is_expired or self._refresh or self._settings.always_revalidate
)
# These conditions only apply if cache_control=True
cc_revalidate = self._settings.cache_control and (
directives.no_cache or directives.must_revalidate
)
# Add the appropriate validation headers, if needed
if revalidate or cc_revalidate:
if directives.etag:
self._validation_headers['If-None-Match'] = directives.etag
if directives.last_modified:
self._validation_headers['If-Modified-Since'] = directives.last_modified
self.send_request = True
self.resend_request = False
def _validate_vary(
self, cached_response: 'CachedResponse', create_key: KeyCallback, **key_kwargs
) -> bool:
"""If the cached response contains Vary, check that the specified request headers match"""
vary = cached_response.headers.get('Vary')
if not vary:
return True
elif vary == '*':
return False
# Generate a secondary cache key based on Vary for both the cached request and new request.
# If there are redirects, compare the new request against the last request in the chain.
key_kwargs['match_headers'] = [k.strip() for k in vary.split(',')]
vary_request = (
cached_response.history[-1].request
if cached_response.history
else cached_response.request
)
vary_cache_key = create_key(vary_request, **key_kwargs)
headers_match = create_key(self._request, **key_kwargs) == vary_cache_key
if not headers_match:
_log_vary_diff(
self._request.headers, cached_response.request.headers, key_kwargs['match_headers']
)
return headers_match
def _log_vary_diff(
headers_1: MutableMapping[str, str], headers_2: MutableMapping[str, str], vary: List[str]
):
"""Log which specific headers specified by Vary did not match"""
if logger.level > DEBUG:
return
headers_1 = normalize_headers(headers_1)
headers_2 = normalize_headers(headers_2)
nonmatching = [k for k in vary if headers_1.get(k) != headers_2.get(k)]
logger.debug(f'Failed Vary check. Non-matching headers: {", ".join(nonmatching)}')
def _log_cache_criteria(operation: str, criteria: Dict):
"""Log details on any failed checks for cache read or write"""
if logger.level > DEBUG:
return
if any(criteria.values()):
status = ', '.join([k for k, v in criteria.items() if v])
else:
status = 'Passed'
logger.debug(f'Pre-{operation} cache checks: {status}') | /requests_cache-1.1.0-py3-none-any.whl/requests_cache/policy/actions.py | 0.912373 | 0.185025 | actions.py | pypi |
from datetime import datetime, timedelta
from decimal import Decimal
from json import JSONDecodeError
from typing import Callable, Dict, ForwardRef, MutableMapping, Optional
from cattr import Converter
from requests.cookies import RequestsCookieJar, cookiejar_from_dict
from requests.exceptions import RequestException
from requests.structures import CaseInsensitiveDict
from ..models import CachedResponse, DecodedContent
from .pipeline import Stage
try:
import ujson as json
except ImportError:
import json # type: ignore
class CattrStage(Stage):
"""Base serializer class that does pre/post-processing with ``cattrs``. This can be used either
on its own, or as a stage within a :py:class:`.SerializerPipeline`.
Args:
factory: A callable that returns a ``cattrs`` converter to start from instead of a new
``Converter``. Mainly useful for preconf converters.
decode_content: Save response body in human-readable format, if possible
Notes on ``decode_content`` option:
* Response body will be decoded into a human-readable format (if possible) during serialization,
and re-encoded during deserialization to reconstruct the original response.
* Supported Content-Types are ``application/json`` and ``text/*``. All other types will be saved as-is.
* Decoded responses are saved in a separate ``_decoded_content`` attribute, to ensure that
``_content`` is always binary.
* This is the default behavior for Filesystem, DynamoDB, and MongoDB backends.
"""
def __init__(
self,
factory: Optional[Callable[..., Converter]] = None,
decode_content: bool = False,
**kwargs
):
self.converter = init_converter(factory, **kwargs)
self.decode_content = decode_content
def dumps(self, value: CachedResponse) -> Dict:
if not isinstance(value, CachedResponse):
return value
response_dict = self.converter.unstructure(value)
return _decode_content(value, response_dict) if self.decode_content else response_dict
def loads(self, value: Dict) -> CachedResponse:
if not isinstance(value, MutableMapping):
return value
return _encode_content(self.converter.structure(value, cl=CachedResponse))
def init_converter(
factory: Optional[Callable[..., Converter]] = None,
convert_datetime: bool = True,
convert_timedelta: bool = True,
) -> Converter:
"""Make a converter to structure and unstructure nested objects within a
:py:class:`.CachedResponse`
Args:
factory: An optional factory function that returns a ``cattrs`` converter
convert_datetime: May be set to ``False`` for pre-configured converters that already have
datetime support
"""
factory = factory or Converter
try:
converter = factory(omit_if_default=True)
# Handle previous versions of cattrs (<22.2) that don't support this argument
except TypeError:
converter = factory()
# Convert datetimes to and from iso-formatted strings
if convert_datetime:
converter.register_unstructure_hook(datetime, lambda obj: obj.isoformat() if obj else None)
converter.register_structure_hook(datetime, _to_datetime)
# Convert timedeltas to and from float values in seconds
if convert_timedelta:
converter.register_unstructure_hook(
timedelta, lambda obj: obj.total_seconds() if obj else None
)
converter.register_structure_hook(timedelta, _to_timedelta)
# Convert dict-like objects to and from plain dicts
converter.register_unstructure_hook(RequestsCookieJar, lambda obj: dict(obj.items()))
converter.register_structure_hook(RequestsCookieJar, lambda obj, cls: cookiejar_from_dict(obj))
converter.register_unstructure_hook(CaseInsensitiveDict, dict)
converter.register_structure_hook(
CaseInsensitiveDict, lambda obj, cls: CaseInsensitiveDict(obj)
)
# Convert decoded JSON body back to a string. If the object is a valid JSON root (dict or list),
# that means it was previously saved in human-readable format due to `decode_content=True`.
# After this hook runs, the body will also be re-encoded with `_encode_content()`.
converter.register_structure_hook(
DecodedContent, lambda obj, cls: json.dumps(obj) if isinstance(obj, (dict, list)) else obj
)
def structure_fwd_ref(obj, cls):
# python<=3.8: ForwardRef may not have been evaluated yet
if not cls.__forward_evaluated__: # pragma: no cover
cls._evaluate(globals(), locals())
return converter.structure(obj, cls.__forward_value__)
# Resolve forward references (required for CachedResponse.history)
converter.register_unstructure_hook_func(
lambda cls: cls.__class__ is ForwardRef,
lambda obj, cls=None: converter.unstructure(obj, cls.__forward_value__ if cls else None),
)
converter.register_structure_hook_func(
lambda cls: cls.__class__ is ForwardRef,
structure_fwd_ref,
)
return converter
def make_decimal_timedelta_converter(**kwargs) -> Converter:
"""Make a converter that uses Decimals instead of floats to represent timedelta objects"""
converter = Converter(**kwargs)
converter.register_unstructure_hook(
timedelta, lambda obj: Decimal(str(obj.total_seconds())) if obj else None
)
converter.register_structure_hook(timedelta, _to_timedelta)
return converter
def _decode_content(response: CachedResponse, response_dict: Dict) -> Dict:
"""Decode response body into a human-readable format, if possible"""
# Decode body as JSON
if response.headers.get('Content-Type') == 'application/json':
try:
response_dict['_decoded_content'] = response.json()
response_dict.pop('_content', None)
except (JSONDecodeError, RequestException):
pass
# Decode body as text
if response.headers.get('Content-Type', '').startswith('text/'):
response_dict['_decoded_content'] = response.text
response_dict.pop('_content', None)
# Otherwise, it is most likely a binary body
return response_dict
def _encode_content(response: CachedResponse) -> CachedResponse:
"""Re-encode response body if saved as JSON or text (via ``decode_content=True``).
This has no effect for a binary response body.
"""
if isinstance(response._decoded_content, str):
response._content = response._decoded_content.encode('utf-8')
response._decoded_content = None
response.encoding = 'utf-8' # Set encoding explicitly so requests doesn't have to guess
response.headers['Content-Length'] = str(len(response._content)) # Size may have changed
return response
def _to_datetime(obj, cls) -> datetime:
if isinstance(obj, str):
obj = datetime.fromisoformat(obj)
return obj
def _to_timedelta(obj, cls) -> timedelta:
if isinstance(obj, (int, float)):
obj = timedelta(seconds=obj)
elif isinstance(obj, Decimal):
obj = timedelta(seconds=float(obj))
return obj | /requests_cache-1.1.0-py3-none-any.whl/requests_cache/serializers/cattrs.py | 0.884576 | 0.232169 | cattrs.py | pypi |
from typing import Any, Callable, Optional, Sequence, Union
from ..models import CachedResponse
class Stage:
"""A single stage in a serializer pipeline. This wraps serialization steps with consistent
``dumps()`` and ``loads()`` methods
Args:
obj: Serializer object or module, if applicable
dumps: Serialization function, or name of method on ``obj``
loads: Deserialization function, or name of method on ``obj``
"""
def __init__(
self,
obj: Any = None,
dumps: Union[str, Callable] = 'dumps',
loads: Union[str, Callable] = 'loads',
):
self.obj = obj
self.dumps = getattr(obj, dumps) if isinstance(dumps, str) else dumps
self.loads = getattr(obj, loads) if isinstance(loads, str) else loads
class SerializerPipeline:
"""A pipeline of stages chained together to serialize and deserialize response objects.
Note: Typically, the first stage should be a :py:class:`.CattrStage`, since this does the
majority of the non-format-specific work to unstructure a response object into a dict (and
vice versa).
Args:
stages: A sequence of :py:class:`Stage` objects, or any objects with ``dumps()`` and
``loads()`` methods
is_binary: Indicates whether the serialized content is binary
"""
def __init__(self, stages: Sequence, name: Optional[str] = None, is_binary: bool = False):
self.is_binary = is_binary
self.stages = stages
self.dump_stages = [stage.dumps for stage in stages]
self.load_stages = [stage.loads for stage in reversed(stages)]
self.name = name
def dumps(self, value) -> Union[str, bytes]:
for step in self.dump_stages:
value = step(value)
return value
def loads(self, value) -> CachedResponse:
for step in self.load_stages:
value = step(value)
return value
def set_decode_content(self, decode_content: bool):
"""Set decode_content, if the pipeline contains a CattrStage or compatible object"""
for stage in self.stages:
if hasattr(stage, 'decode_content'):
stage.decode_content = decode_content
def __str__(self) -> str:
return f'SerializerPipeline(name={self.name}, n_stages={len(self.dump_stages)})' | /requests_cache-1.1.0-py3-none-any.whl/requests_cache/serializers/pipeline.py | 0.952086 | 0.401394 | pipeline.py | pypi |
r"""
The ``codes`` object defines a mapping from common names for HTTP statuses
to their numerical codes, accessible either as attributes or as dictionary
items.
>>> requests.codes['temporary_redirect']
307
>>> requests.codes.teapot
418
>>> requests.codes['\o/']
200
Some codes have multiple names, and both upper- and lower-case versions of
the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
``codes.okay`` all correspond to the HTTP status code 200.
"""
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
421: ('misdirected_request',),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
511: ('network_authentication_required', 'network_auth', 'network_authentication'),
}
codes = LookupDict(name='status_codes')
def _init():
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
if not title.startswith(('\\', '/')):
setattr(codes, title.upper(), code)
def doc(code):
names = ', '.join('``%s``' % n for n in _codes[code])
return '* %d: %s' % (code, names)
global __doc__
__doc__ = (__doc__ + '\n' +
'\n'.join(doc(code) for code in sorted(_codes))
if __doc__ is not None else None)
_init() | /requests-ccwienk-2.21.1.tar.gz/requests-ccwienk-2.21.1/requests/status_codes.py | 0.842264 | 0.387864 | status_codes.py | pypi |
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the body of the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'https://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs) | /requests-ccwienk-2.21.1.tar.gz/requests-ccwienk-2.21.1/requests/api.py | 0.872646 | 0.417806 | api.py | pypi |
from .compat import OrderedDict, Mapping, MutableMapping
class CaseInsensitiveDict(MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default) | /requests-ccwienk-2.21.1.tar.gz/requests-ccwienk-2.21.1/requests/structures.py | 0.906736 | 0.436262 | structures.py | pypi |
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP Library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('https://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2017 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
import urllib3
import chardet
import warnings
from .exceptions import RequestsDependencyWarning
def check_compatibility(urllib3_version, chardet_version):
urllib3_version = urllib3_version.split('.')
assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git.
# Sometimes, urllib3 only reports its version as 16.1.
if len(urllib3_version) == 2:
urllib3_version.append('0')
# Check urllib3 for compatibility.
major, minor, patch = urllib3_version # noqa: F811
major, minor, patch = int(major), int(minor), int(patch)
# urllib3 >= 1.21.1, <= 1.25
assert major == 1
assert minor >= 21
assert minor <= 25
# Check chardet for compatibility.
major, minor, patch = chardet_version.split('.')[:3]
major, minor, patch = int(major), int(minor), int(patch)
# chardet >= 3.0.2, < 3.1.0
assert major == 3
assert minor < 1
assert patch >= 2
def _check_cryptography(cryptography_version):
# cryptography < 1.3.4
try:
cryptography_version = list(map(int, cryptography_version.split('.')))
except ValueError:
return
if cryptography_version < [1, 3, 4]:
warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version)
warnings.warn(warning, RequestsDependencyWarning)
# Check imported dependencies for compatibility.
try:
check_compatibility(urllib3.__version__, chardet.__version__)
except (AssertionError, ValueError):
warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported "
"version!".format(urllib3.__version__, chardet.__version__),
RequestsDependencyWarning)
# Attempt to enable urllib3's SNI support, if possible
try:
from urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
# Check cryptography version
from cryptography import __version__ as cryptography_version
_check_cryptography(cryptography_version)
except ImportError:
pass
# urllib3's DependencyWarnings should be silenced.
from urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from .__version__ import __title__, __description__, __url__, __version__
from .__version__ import __build__, __author__, __author_email__, __license__
from .__version__ import __copyright__, __cake__
from . import utils
from . import packages
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
from logging import NullHandler
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True) | /requests-ccwienk-2.21.1.tar.gz/requests-ccwienk-2.21.1/requests/__init__.py | 0.696371 | 0.258303 | __init__.py | pypi |
import base64
import collections
import datetime
import hashlib
import os
import requests
import six
from cryptography.hazmat import backends as crypto_backends
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
def digester(data):
"""Create SHA-1 hash, get digest, b64 encode, split every 60 char."""
if not isinstance(data, six.binary_type):
data = data.encode('utf_8')
hashof = hashlib.sha1(data).digest()
encoded_hash = base64.b64encode(hashof)
if not isinstance(encoded_hash, six.string_types):
encoded_hash = encoded_hash.decode('utf_8')
chunked = splitter(encoded_hash, chunksize=60)
lines = '\n'.join(chunked)
return lines
def normpath(path):
"""Normalize a path.
Expands ~'s, resolves relative paths, normalizes and returns
an absolute path.
"""
return os.path.abspath(os.path.normpath(os.path.expanduser(path)))
def splitter(iterable, chunksize=60):
"""Split an iterable that supports indexing into chunks of 'chunksize'."""
return (iterable[0+i:chunksize+i]
for i in range(0, len(iterable), chunksize))
class ChefAuth(requests.auth.AuthBase): # pylint: disable=R0903
"""Sign requests with user's private key.
See https://docs.chef.io/auth.html
https://docs.chef.io/auth.html#header-format
"""
datetime_fmt = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, user_id, private_key):
"""Initialize with any callable handlers."""
if not all((user_id, private_key)):
raise ValueError("Authenticating to Chef server requires "
"both user_id and private_key.")
if isinstance(private_key, rsa.RSAPrivateKey):
private_key = RSAKey(private_key)
elif isinstance(private_key, RSAKey):
# good to go
pass
else:
private_key = RSAKey.load_pem(private_key)
self.private_key = private_key
if not isinstance(user_id, six.string_types):
raise TypeError(
"'user_id' must be a 'str' object, not {0!r}".format(user_id))
self.user_id = user_id
def __repr__(self):
"""Show the auth handler object."""
return '%s(%s)' % (type(self).__name__, self.user_id)
def __call__(self, request):
"""Sign the request."""
hashed_body = digester(request.body or '')
stripped_path = request.path_url.partition('?')[0]
hashed_path = digester(stripped_path)
timestamp = datetime.datetime.utcnow().strftime(self.datetime_fmt)
canonical_request = self.canonical_request(
request.method, hashed_path, hashed_body, timestamp)
signed = self.private_key.sign(canonical_request, b64=True)
signed_chunks = splitter(signed, chunksize=60)
signed_headers = {
'X-Ops-Authorization-%d' % (i+1): segment
for i, segment in enumerate(signed_chunks)
}
auth_headers = {
'X-Ops-Sign': 'algorithm=sha1;version=1.0',
'X-Ops-UserId': self.user_id,
'X-Ops-Timestamp': timestamp,
'X-Ops-Content-Hash': hashed_body,
}
auth_headers.update(signed_headers)
request.headers.update(auth_headers)
return request
def canonical_request(self, method, path, content, timestamp):
"""Return the canonical request string."""
request = collections.OrderedDict([
('Method', method.upper()),
('Hashed Path', path),
('X-Ops-Content-Hash', content),
('X-Ops-Timestamp', timestamp),
('X-Ops-UserId', self.user_id),
])
return '\n'.join(['%s:%s' % (key, value)
for key, value in request.items()])
class RSAKey(object):
"""Requires an instance of RSAPrivateKey to initialize.
The base class for this type is found in the crytography library
at cryptography.hazmat.primitives.asymmetric.rsa
"""
def __init__(self, private_key):
"""Requires an RSAPrivateKey instance.
Key class from cryptography.hazmat.primitives.asymmetric.rsa
"""
if not isinstance(private_key, rsa.RSAPrivateKey):
raise TypeError("private_key must be an instance of "
"cryptography-RSAPrivateKey.")
self.private_key = private_key
@classmethod
def load_pem(cls, private_key, password=None):
"""Return a PrivateKey instance.
:param private_key: Private key string (PEM format) or the path
to a local private key file.
"""
# TODO(sam): try to break this in tests
maybe_path = normpath(private_key)
if os.path.isfile(maybe_path):
with open(maybe_path, 'rb') as pkf:
private_key = pkf.read()
if not isinstance(private_key, six.binary_type):
private_key = private_key.encode('utf-8')
pkey = serialization.load_pem_private_key(
private_key,
password=password,
backend=crypto_backends.default_backend())
return cls(pkey)
def sign(self, data, b64=True):
"""Sign data with the private key and return the signed data.
The signed data will be Base64 encoded if b64 is True.
"""
padder = padding.PKCS1v15()
signer = self.private_key.signer(padder, None)
if not isinstance(data, six.binary_type):
data = data.encode('utf_8')
signer.update(data)
signed = signer.finalize()
if b64:
signed = base64.b64encode(signed)
return signed | /requests-chef-0.1.7.tar.gz/requests-chef-0.1.7/requests_chef/mixlib_auth.py | 0.691914 | 0.158369 | mixlib_auth.py | pypi |
from requests_cloud_auth.keystone import KeystoneV2AuthBase
US_ENDPOINT = 'https://identity.api.rackspacecloud.com'
UK_ENDPOINT = 'https://lon.identity.api.rackspacecloud.com'
class RackspacePasswordAuth(KeystoneV2AuthBase):
def __init__(self, username, password, endpoint=None,
region='US'):
"""Authentication extension for Requests that supports Rackspace
password authentication.
:param username: Valid Rackspace Cloud username
:param password: Valid Rackspace Cloud password
:param endpoint: (optional) URI to override authentication endpoint
:param region: (optional) Specify the Rackspace Cloud region.
Supported values: US, UK
:return: Instance of RackspacePasswordAuth
"""
if not endpoint:
endpoint = UK_ENDPOINT if region.lower() == 'uk' else US_ENDPOINT
super(RackspacePasswordAuth, self).__init__(
endpoint=endpoint,
username=username,
password=password
)
def get_token(self):
body = {
'auth': {
'passwordCredentials': {
'username': self.username,
'password': self.password
}
}
}
return self.get_token_from_request_body(body)
class RackspaceApiKeyAuth(KeystoneV2AuthBase):
def __init__(self, username, api_key, endpoint=None,
region='US'):
"""Authentication extension for Requests that supports Rackspace
API key authentication.
:param username: Valid Rackspace Cloud username
:param api_key: Valid Rackspace Cloud API key
:param endpoint: (optional) URI to override authentication endpoint
:param region: (optional) Specify the Rackspace Cloud region.
Supported values: US, UK
:return: Instance of RackspaceApiKeyAuth
"""
if not endpoint:
endpoint = UK_ENDPOINT if region.lower() == 'uk' else US_ENDPOINT
super(RackspaceApiKeyAuth, self).__init__(
endpoint=endpoint,
username=username,
api_key=api_key
)
def get_token(self):
body = {
'auth': {
'RAX-KSKEY:apiKeyCredentials': {
'username': self.username,
'apiKey': self.api_key
}
}
}
return self.get_token_from_request_body(body) | /requests-cloud-auth-0.0.2.tar.gz/requests-cloud-auth-0.0.2/requests_cloud_auth/rackspace.py | 0.859384 | 0.2372 | rackspace.py | pypi |
from __future__ import absolute_import
import codecs
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarrassingly-simple replacement for mimetools.choose_boundary.
We are lazily loading uuid here, because we don't want its issues
https://bugs.python.org/issue5885
https://bugs.python.org/issue11063
to affect our entire library.
"""
from uuid import uuid4
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type | /requests-core-0.0.0.tar.gz/requests-core-0.0.0/requests_core/http_manager/filepost.py | 0.696475 | 0.19477 | filepost.py | pypi |
from ._collections import HTTPHeaderDict
# This dictionary is used to store the default ports for specific schemes to
# control whether the port is inserted into the Host header.
DEFAULT_PORTS = {"http": 80, "https": 443}
class Request(object):
"""
The base, common, Request object.
This object provides a *semantic* representation of a HTTP request. It
includes all the magical parts of a HTTP request that we have come to know
and love: it has a method, a target (the path & query portions of a URI),
some headers, and optionally a body.
All of urllib3 manipulates these Request objects, passing them around and
changing them as necessary. The low-level layers know how to send these
objects.
"""
def __init__(self, method, target, headers=None, body=None):
# : The HTTP method in use. Must be a byte string.
self.method = method
# : The request target: that is, the path and query portions of the URI.
self.target = target
# : The request headers. These are always stored as a HTTPHeaderDict.
self.headers = HTTPHeaderDict(headers)
# : The request body. This is allowed to be one a few kind of objects:
#: - A byte string.
#: - A "readable" object.
#: - An iterable of byte strings.
#: - A text string (not recommended, auto-encoded to UTF-8)
self.body = body
def add_host(self, host, port, scheme):
"""
Add the Host header, as needed.
This helper method exists to circumvent an ordering problem: the best
layer to add the Host header is the bottom layer, but it is the layer
that will add headers last. That means that they will appear at the
bottom of the header block.
Proxies, caches, and other intermediaries *hate* it when clients do
that because the Host header is routing information, and they'd like to
see it as early as possible. For this reason, this method ensures that
the Host header will be the first one emitted. It also ensures that we
do not duplicate the host header: if there already is one, we just use
that one.
"""
if b'host' not in self.headers:
# We test against a sentinel object here to forcibly always insert
# the port for schemes we don't understand.
if port is DEFAULT_PORTS.get(scheme, object()):
header = host
else:
header = "{}:{}".format(host, port)
headers = HTTPHeaderDict(host=header)
headers._copy_from(self.headers)
self.headers = headers
class Response(object):
"""
The abstract low-level Response object that urllib3 works on. This is not
the high-level helpful Response object that is exposed at the higher layers
of urllib3: it's just a simple object that just exposes the lowest-level
HTTP semantics to allow processing by the higher levels.
"""
def __init__(self, status_code, headers, body, version):
# : The HTTP status code of the response.
self.status_code = status_code
# : The headers on the response, as a HTTPHeaderDict.
self.headers = HTTPHeaderDict(headers)
# : The request body. This is an iterable of bytes, and *must* be
#: iterated if the connection is to be preserved.
self.body = body
# : The HTTP version of the response. Stored as a bytestring.
self.version = version
@property
def complete(self):
"""
If the response can be safely returned to the connection pool, returns
True.
"""
return self.body.complete | /requests-core-0.0.0.tar.gz/requests-core-0.0.0/requests_core/http_manager/base.py | 0.846276 | 0.467879 | base.py | pypi |
from __future__ import absolute_import
from .filepost import encode_multipart_formdata
from .packages import six
from .packages.six.moves.urllib.parse import urlencode
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(
self,
method,
url,
body=None,
headers=None,
encode_multipart=True,
multipart_boundary=None,
**kw
): # Abstract
raise NotImplementedError(
"Classes extending RequestMethods must implement "
"their own ``urlopen`` method."
)
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(
method, url, fields=fields, headers=headers, **urlopen_kw
)
else:
return self.request_encode_body(
method, url, fields=fields, headers=headers, **urlopen_kw
)
def request_encode_url(
self, method, url, fields=None, headers=None, **urlopen_kw
):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': headers}
extra_kw.update(urlopen_kw)
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **extra_kw)
def request_encode_body(
self,
method,
url,
fields=None,
headers=None,
encode_multipart=True,
multipart_boundary=None,
**urlopen_kw
):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one."
)
if encode_multipart:
body, content_type = encode_multipart_formdata(
fields, boundary=multipart_boundary
)
else:
body, content_type = urlencode(
fields
), 'application/x-www-form-urlencoded'
if isinstance(body, six.text_type):
body = body.encode('utf-8')
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw) | /requests-core-0.0.0.tar.gz/requests-core-0.0.0/requests_core/http_manager/request.py | 0.823257 | 0.173778 | request.py | pypi |
from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if not six.PY3 and isinstance(value, six.text_type): # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(
self,
content_disposition=None,
content_type=None,
content_location=None,
):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers[
'Content-Disposition'
] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join(
[
'',
self._render_parts(
(('name', self._name), ('filename', self._filename))
),
]
)
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location | /requests-core-0.0.0.tar.gz/requests-core-0.0.0/requests_core/http_manager/fields.py | 0.809276 | 0.410579 | fields.py | pypi |
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError(
'update() takes at most 2 positional '
'arguments (%d given)' % (len(args),)
)
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self) | /requests-core-0.0.0.tar.gz/requests-core-0.0.0/requests_core/http_manager/packages/ordered_dict.py | 0.562898 | 0.282054 | ordered_dict.py | pypi |
from __future__ import absolute_import
import time
import logging
from collections import namedtuple
from itertools import takewhile
import email
import re
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
InvalidHeader,
)
from ..packages import six
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple(
'RequestHistory', ["method", "url", "error", "status", "redirect_location"]
)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
Set to a ``False`` value to retry on any verb.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``method_whitelist``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
"""
DEFAULT_METHOD_WHITELIST = frozenset(
['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']
)
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
# : Maximum backoff time.
BACKOFF_MAX = 120
def __init__(
self,
total=10,
connect=None,
read=None,
redirect=None,
status=None,
method_whitelist=DEFAULT_METHOD_WHITELIST,
status_forcelist=None,
backoff_factor=0,
raise_on_redirect=True,
raise_on_status=True,
history=None,
respect_retry_after_header=True,
):
self.total = total
self.connect = connect
self.read = read
self.status = status
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect,
read=self.read,
redirect=self.redirect,
status=self.status,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(
list(
takewhile(
lambda x: x.redirect_location is None,
reversed(self.history),
)
)
)
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (
2 ** (consecutive_errors_len - 1)
)
return min(self.BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate(retry_after)
if retry_date_tuple is None:
raise InvalidHeader(
"Invalid Retry-After header: %s" % retry_after
)
retry_date = time.mktime(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
""" Get the value of Retry-After in seconds. """
retry_after = response.getheader("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
""" Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
""" Checks if a given HTTP method should be retried upon, depending if
it is included on the method whitelist.
"""
if self.method_whitelist and method.upper(
) not in self.method_whitelist:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
""" Is this method/status code retryable? (Based on whitelists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (
self.total and
self.respect_retry_after_header and
has_retry_after and
(status_code in self.RETRY_AFTER_STATUS_CODES)
)
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (
self.total, self.connect, self.read, self.redirect, self.status
)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(
self,
method=None,
url=None,
response=None,
error=None,
_pool=None,
_stacktrace=None,
):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
cause = 'unknown'
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status
)
status = response.status
history = self.history + (
RequestHistory(method, url, error, status, redirect_location),
)
new_retry = self.new(
total=total,
connect=connect,
read=read,
redirect=redirect,
status=status_count,
history=history,
)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return (
'{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect}, status={self.status})'
).format(
cls=type(self), self=self
)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3) | /requests-core-0.0.0.tar.gz/requests-core-0.0.0/requests_core/http_manager/util/retry.py | 0.808219 | 0.28934 | retry.py | pypi |
# requests-credssp
[](https://github.com/jborean93/requests-credssp/actions/workflows/ci.yml)
[](https://codecov.io/gh/jborean93/requests-credssp)
[](https://badge.fury.io/py/requests-credssp)
[](https://github.com/jborean93/request-credssp/blob/master/LICENSE)
## About this library
This package allows for HTTPS CredSSP authentication using the requests
library. CredSSP is a Microsoft authentication that allows your credentials to
be delegated to a server giving you double hop authentication.
## Features
This library supports the following CredSSP features
* Protocol version 2 to 6
* Initial authentication with NTLM or Kerberos
* Message encryption support using the `wrap` and `unwrap` functions
## Requirements
The following Python libraries are required;
* Python 3.6+
* [cryptography](https://github.com/pyca/cryptography)
* [pyspnego](https://github.com/jborean93/pyspnego)
* [requests>=2.0.0](https://pypi.python.org/pypi/requests)
* For Kerberos authentication on Unix [python-gssapi](https://github.com/pythongssapi/python-gssapi) and [pykrb5](https://github.com/jborean93/pykrb5) installed with `requests-credssp[kerberos]`
By default, this library can authenticate with a Windows host using NTLM
messages, if Kerberos authentication is desired, please read the below.
## Installation
To install requests-credssp, simply run
```
pip install requests-credssp
# to install the optional Kerberos functionality, run (see below)
pip install requests-credssp[kerberos]
```
### Kerberos on Linux
To add support for Kerberos authentication on a non-Windows host, the Kerberos
system headers must be installed and the `python-gssapi` library installed. To
install the Kerberos system headers you can install the following packages;
```
# Via Yum (Centos RHEL)
yum -y install python-devel krb5-devel krb5-libs krb5-workstation
# Via Dnf (Fedora)
dnf -y install python-devel krb5-devel krb5-libs krb5-workstation
# Via Apt (Ubuntu)
apt-get -y install python-dev libkrb5-dev krb5-user
# Via Portage (Gentoo)
emerge -av app-crypt/mit-krb5
emerge -av dev-python/setuptools
# Via pkg (FreeBSD)
sudo pkg install security/krb5
# Via OpenCSW (Solaris)
pkgadd -d http://get.opencsw.org/now
/opt/csw/bin/pkgutil -U
/opt/csw/bin/pkgutil -y -i libkrb5_3
# Via Pacman (Arch Linux)
pacman -S krb5
```
Once installed, the Python Kerberos libraries can be installed with
```
pip install requests-credssp[kerberos]
```
Once installed, the file `/etc/krb5.conf` should be configured so it can talk
with the Kerberos KDC.
To add proper SPNEGO support with `python-gssapi`, the
[gss-ntlmssp](https://github.com/simo5/gss-ntlmssp) should also be installed
which adds NTLM as a supported GSSAPI mechanism required for proper SPNEGO
interoperability with Windows. This package can be installed with;
```
# Via Yum (Centos RHEL) - requires epel-release
yum -y install epel-release
yum -y install gssntlmssp
# Via Dnf (Fedora)
dnf -y install gssntlmssp
# Via Apt (Ubuntu)
apt-get -y install gss-ntlmssp
# Via Pacman (Arch Linux)
pacman -S gss-ntlmssp
```
## Additional Info
The CredSSP protocol is quite complex and uses a lot of other protocols or
standards to work properly. This unfortunately means some older hosts or
settings are incompatible or require some workarounds to get working. Currently
you can configure the following settings when initialising the CredSSP class;
* `auth_mechanism`: The authentication mechanism to use initially, default is `auto`
* `disable_tlsv1_2`: Whether to disable `TLSv1.2` support and work with older protocols like `TLSv1.0`, default is `False`
* `minimum_version`: The minimum CredSSP server version that is required by the client, default is `2`
### Authentication Mechanisms
Part of the CredSSP protocol is to authenticate the user's credentials using
the SPNEGO protocol. The SPNEGO protocol is also called `Negotiate` and is
able to negotiate a common protocol between the client and the server which
can currently be either `NTLM` or `Kerberos`. Kerberos is a tricky protocol
to have set up but should be used wherever it is possible as NTLM uses older
standards that are considered broken from a security perspective.
Due to historical decisions and that Kerberos is not always available by
default, the base install of `requests-credssp` will only work with `NTLM`.
When the Kerberos packages are installed and configured, `requests-credssp`
will automatically attempt to use `Kerberos` if possible but fall back to
`NTLM` if it fails like it would with `SPNEGO`. If you wish to force either
`Kerberos` or `NTLM` instead of relying on the `SPNEGO` mechanism, you can set
`auth_mechanism=<auth_mech>` when creating `HttpCredSSPAuth` like so;
```
import requests
from requests_credssp import HttpCredSSPAuth
# use SPNEGO (default if omitted)
credssp_auth = HttpCredSSPAuth('domain\\user', 'password',
auth_mechanism='auto')
# only allow Kerberos
credssp_auth = HttpCredSSPAuth('user@REALM.COM', 'password',
auth_mechanism='kerberos')
# only allow NTLM
credssp_auth = HttpCredSSPAuth('domain\\user', 'password',
auth_mechanism='ntlm')
r = requests.get("https://server:5986/wsman", auth=credssp_auth)
```
### TLS Protocol Versions
As CredSSP uses TLS to encrypt the tokens that are transferred between the
client and the server, it is succeptible to differing implementations of SSL.
By default, `requests-credssp` will work with server's that offer TLSv1.2
but older Windows hosts that do not support this newer protocol version will
TLSv1.2 was added in Windows Server 2012 and Windows 8 where older hosts need
an optional update to be installed for it to work. If this update cannot be
installed or you are willing to accept the risks of using the older TLS
protocols, `requests-credssp` can be set to disable TLSv1.2 and work with
older protocols like so;
```python
import requests
from requests_credssp import HttpCredSSPAuth
credssp_auth = HttpCredSSPAuth('domain\\user', 'password', disable_tlsv1_2=True)
r = requests.get("https://server:5986/wsman", auth=credssp_auth)
```
### CredSSP Protocol Versions
Recently Microsoft has released a security update to CredSSP to mitigate
[CVE 2018-0886](https://support.microsoft.com/en-us/help/4093492/credssp-updates-for-cve-2018-0886-march-13-2018).
The update added 2 new CredSSP protocol versions, `5` and `6` which changes
the way the client and server authenticate each other. While these changes are
transparent to someone who uses this library, it may be prudent to set the
minimum version that this client would authenticate with. This means that any
older server's who have not been patched for this vulnerability will be
rejected.
To set a minimum protocol version that will only allow servers that have been
patched for `CVE 2018-0886`, set `minimum_version=5` when creating
`HttpCredSSPAuth` like so;
```
import requests
from requests_credssp import HttpCredSSPAuth
credssp_auth = HttpCredSSPAuth('domain\\user', 'password', minimum_version=5)
r = requests.get("https://server:5986/wsman", auth=credssp_auth)
```
### Message Encryption
You can use this library to encrypt and decrypt messages sent to and from the
server. Message encryption is done over the TLS channel that was negotiated in
the authentication stage. The below is an example of encrypting and decrypting
messages, note this is only a basic example and not a working script and the
actual implementation depends on the protocol that is reading the messages.
```python
import requests
from requests_credssp import HttpCredSSPAuth
# build the auth request and sent an empty message to authenticate
hostname = "server"
session = requests.Session()
session.auth = HttpCredSSPAuth('domain\\user', 'password')
request = requests.Request('POST', "https://%s:5986/wsman" % server, data=None)
prepared_request = self.session.prepare_request(request)
response = session.send(prepared_request)
context = session.auth.contexts[hostname]
# encrypt the message using the wrap command
message = b'hi server'
encrypted_message = context.wrap(message)
# send the encrypted message and get the encrypted response
request = requests.Request('POST', 'https://server:5986/wsman', data=encrypted_message)
prepared_request = self.session.prepare_request(request)
response = session.send(prepared_request)
# decrypt the encrypted response from the server
encrypted_response = response.content
decrypted_response = context.unwrap(encrypted_response)
```
## Logging
This library uses the standard Python logging facilities. Log messages are
logged to the `requests_credssp` and `requests_credssp.credssp` named loggers.
If you are receiving any errors or wish to debug the CredSSP process you should
enable DEBUG level logs. These logs show fine grain information such as the
protocol and cipher negotiated and each CredSSP token used in the
authentication process.
## Backlog
* Add support for different credential types like smart card and redirected credentials
| /requests-credssp-2.0.0.tar.gz/requests-credssp-2.0.0/README.md | 0.489259 | 0.850002 | README.md | pypi |
import http
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from requests_toolbelt.utils import dump
import requests
class TimeoutHTTPAdapter(HTTPAdapter):
"""Setting default timeouts.
Attributes
----------
TIMEOUT_DEFAULT : int or float
Timeout to use at all requests. Seconds.
Parameters
----------
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments.
"""
TIMEOUT_DEFAULT = 5
def __init__(self, *args, **kwargs):
self.timeout = self.TIMEOUT_DEFAULT
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
"""Use timeout at the requests.
Overrides send() method to use the default timeout if no other
provided.
Parameters
----------
request
**kwargs
Arbitrary keyword arguments.
"""
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
class RequestsCustom:
"""Apply desired modifications to the module requests.
Current capabilities configured:
- Custom timeout for all requests.
- Raise exception with certain HTTP status code responses.
- Retry on failure.
Attributes
----------
BACKOFF_FACTOR : int
Seconds to sleep between failed request,
after the second try, see _get_backoff function.
debug_full : bool
Activate debug the entire HTTP lifecycle.
debug_simple : bool
Debug the requests with less information, see _set_debug_simple().
METHOD_WHITELIST : list of strs
HTTP methods to retry on. POST not included by default.
RETRY_ATTEMPTS : int
Total number of retry attempts to make.
STATUS_FORCELIST
HTTP response codes to retry on.
TIMEOUT_DEFAULT : int or float
Timeout to use at all requests. Seconds.
Parameters
----------
debug_simple
See the attribute with the same name.
debug_full
See the attribute with the same name.
"""
def __init__(self, debug_simple=False, debug_full=False):
self.BACKOFF_FACTOR = 2
self.METHOD_WHITELIST = [
"HEAD",
"GET",
"PUT",
"DELETE",
"OPTIONS",
"TRACE",
]
self.RETRY_ATTEMPTS = 5
# Status 408: timeout.
self.STATUS_FORCELIST = [408, 429, 500, 502, 503, 504]
self.TIMEOUT_DEFAULT = 5
self._log_backoff_factor()
# Set debug.
# Initialize attributes.
self.debug_simple = debug_simple
self.debug_full = debug_full
# Only activate one type of debug.
if debug_simple is True:
self._set_debug_simple()
self.debug_full = False
elif debug_full is True:
self.debug_full = True
def _log_backoff_factor(self):
print(
"RequestsCustom backoff factor"
f": {', '.join(map(str,self._get_backoff()))}"
)
def _get_backoff(self):
"""Calculate the seconds to wait betweet attempt.
These values are calculated according with the class configuration.
For example 2 seconds means 1s, 2s, 4s... to wait between attempts.
Returns
-------
list of ints
Seconds to wait between each attempt.
..https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#module-urllib3.util.retry
"""
return [
self.BACKOFF_FACTOR * (2 ** (attempt - 1))
for attempt in range(self.RETRY_ATTEMPTS)
]
def _set_debug_simple(self):
"""Debug requests and headers, no response body.
The debug information will appear too when this module is
called from another ones.
"""
# A value greater than 0 enables debug logging.
http.client.HTTPConnection.debuglevel = 1
def _logging_hook(self, response, *args, **kwargs):
"""Debug the entire HTTP lifecycle.
Parameters
----------
response
*args
**kwargs
..https://toolbelt.readthedocs.io/en/latest/dumputils.html
"""
data = dump.dump_all(response)
print(data.decode("utf-8"))
def get_requests(self):
"""Get custom request object.
Returns
-------
request object
"""
# Create a custom requests object, modifying the global module throws
# an error.
http = requests.Session()
# Raise exception if HTTP status code is 4xx or 5xx.
assert_status_hook = (
lambda response, *args, **kwargs: response.raise_for_status()
)
http.hooks["response"] = [assert_status_hook]
# Set debug the entire HTTP lifecycle.
if self.debug_full is True:
http.hooks["response"] = [self._logging_hook]
# Retry on failure.
retries = Retry(
total=self.RETRY_ATTEMPTS,
status_forcelist=self.STATUS_FORCELIST,
method_whitelist=self.METHOD_WHITELIST,
backoff_factor=self.BACKOFF_FACTOR,
)
# Mount it for both http and https usage
adapter = TimeoutHTTPAdapter(timeout=self.TIMEOUT_DEFAULT, max_retries=retries)
http.mount("https://", adapter)
http.mount("http://", adapter)
return http
if __name__ == "__main__":
print(__doc__) | /requests_custom-0.0.6.tar.gz/requests_custom-0.0.6/requests_custom/requests_custom.py | 0.779028 | 0.155206 | requests_custom.py | pypi |
__author__ = "Duncan Macleod <duncan.macleod@ligo.org>"
from requests import (
Session as _Session,
)
from .auth import HTTPECPAuth
class ECPAuthSessionMixin:
"""A mixin for `requests.Session` to add default ECP Auth.
This creates a default `~requests.Session.auth` attribute on created
`~requests.Session` objects as an instance of the
`~requests_ecp.HTTPECPAuth` authorisation plugin:
.. code-block:: python
from requests import Session
from requests_ecp import ECPAuthSessionMixin
class MySession(ECPAuthSessionMixin, Session):
pass
This can be mixed with any other `~requests.Session` mixins, but beware
of the inheritance order that may impact which mixin preserves the final
`~requests.Session.auth` attribute.
See also
--------
requests_ecp.Session
For a ready-made wrapped `~requests.Session`.
"""
def __init__(
self,
idp=None,
kerberos=False,
username=None,
password=None,
**kwargs,
):
super().__init__(**kwargs)
self.auth = HTTPECPAuth(
idp,
kerberos=kerberos,
username=username,
password=password,
)
class Session(ECPAuthSessionMixin, _Session):
"""A `requests.Session` wrapper with default SAML/ECP authentication.
To start a `~requests.Session` to handle ECP authentication with a
particular Identity Provider (IdP) pass the ``idp`` argument with the
URL of the ECP endpoint or the IdP.
For any individual requests in this `~requests.Session`
that are redirected to a SAML/Shibboleth authentication page/app the
`~requests_ecp.HTTPECPAuth` authorisation plugin will automatically
intercept the redirect and invoke a SAML/ECP authorisation workflow:
>>> from requests_ecp import Session
>>> with Session(idp="https://idp.example.com/SAML/SOAP/ECP") as sess:
... sess.get("https://private.example.com/data")
"""
def ecp_authenticate(self, url, endpoint=None, **kwargs):
"""Manually authenticate against the endpoint.
This generates a shibboleth session cookie for the domain
of the given URL, which defaults to the endpoint itself.
Parameters
----------
url : `str`
The URL of the resource (on the Service Provider) to request.
endpoint : `str`
The URL of the ECP endpoint on the Identity Provider.
If not given it will be taken from the ``auth`` attribute.
kwargs
Other keyword arguments are passed directly to
:func:`requests_ecp.ecp.authenticate`.
See also
--------
requests_ecp.ecp.authenticate
For details of the ECP authentication workflow.
"""
if not isinstance(self.auth, HTTPECPAuth):
raise ValueError(
f"Cannot execute ECP authentication with {type(self.auth)}",
)
return self.auth._authenticate_session(
self,
endpoint=endpoint,
url=url,
**kwargs
) | /requests-ecp-0.3.1.tar.gz/requests-ecp-0.3.1/requests_ecp/session.py | 0.846165 | 0.223864 | session.py | pypi |
from requests_extra.internal.session_cache import get_cached_session
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'https://httpbin.org/get')
>>> req
<Response [200]>
"""
session = get_cached_session(url)
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", True)
return request("get", url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", True)
return request("options", url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes. If
`allow_redirects` is not provided, it will be set to `False` (as
opposed to the default :meth:`request` behavior).
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", False)
return request("head", url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("post", url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("put", url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("patch", url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("delete", url, **kwargs) | /requests_extra-1.0.0b5.tar.gz/requests_extra-1.0.0b5/requests_extra/api.py | 0.875015 | 0.367015 | api.py | pypi |
[](https://travis-ci.com/maarten-dp/requests-flask-adapter)
### Purpose
FlaskAdapter is a requests adapter intended to allow its user to call Flask app endpoints, with requests, without having to run a Flask server.
Its main uses include building integration tests and client tests without having to resort to multithreading/multiprocessing/running an instance in another shell/docker to spawn a running Flask app. In doing so, you are able to to call the endpoints you wish to test with your client. It can also be used as an alternative to the Flask test_client, unlocking the well-known and well-loved interface of requests in your unittests, because god knows I love those `json.loads(res.data.decode('utf-8'))` statements in my tests.
### Using FlaskAdapter as a test client
You can swap out the flask test client for a requests interface in two ways.
The first would be to import the requests_flask_adapter session, which is basically a session subclassed from a requests Session, but allows the registering of apps.
```python
from requests_flask_adapter import Session
from my_production_code import setup_my_app
from pytest import fixture
@fixture
def session():
app = setup_my_app()
Session.register('http://my_app', app)
return Session()
def test_it_runs_my_test(session):
result = session.get("http://my_app/my_endpoint", params={'some': 'params'})
assert result.json() == {'nailed': 'it'}
```
if you don't want to or, for some reason, can't rely on the `requests_flask_adapter.Session`, you can also use the requests_flask_adapter helper function to monkey patch the requests Session. For now, it heavily depends on import order, so make sure to patch it before importing the Session for your tests.
```python
from requests_flask_adapter.helpers import patch_requests
patch_requests([
('http://patched_app', app),
('http://another_patched_app', another_app)
])
```
And in your tests you can now run code that imports the requests.Session
```python
def test_it_runs_code_that_imports_requests():
result = my_code_that_imports_requests_and_does_something()
assert result == [':ok_hand:']
```
### Using FlaskAdapter for client testing
Similarly, FlaskAdapter is very effective for testing a client that is written with requests.
And once again, without having to run a live server of your flask app.
```python
from requests_flask_adapter import Session
from my_production_code import setup_my_app, User
from my_client import Client
from pytest import fixture
@fixture
def client():
app = setup_my_app()
Session.register('http://my_app', app)
return Client(
base_url='http://my_app',
session=Session(), # monkeypatch if your client isn't accepting another session.
auth=('Scanlan', 'b3st_b4rd_Exandr!a'),
)
def test_it_gets_a_user_list(client):
users = client.users()
assert users == ['vex', 'vax']
def test_it_can_upload_a_timesheet(client):
with open('data/timesheet.xls', 'r') as fh:
client.upload_timesheet(fh)
user = User.query.get(1)
assert user.hours_worked_this_month == 8
```
### Using FlaskAdapter for cross app integration tests
And just because I need to bloat this readme a bit to validate this project, I'm throwing in "integration testing" as one of its functionalities.
Of course, these integration tests require you to have access to the source code of the flask apps you're trying to test.
So, here's an example.
Let's assume the your team owns and maintains the following codebases:
- A webshop application that's also keeping track of sales, users visited and other stats from the last hour. These stats are accessible though an endpoint in your app.
- An ETL script that periodically runs and collects realtime stats from your webshop.
- A timeseries database that stores the data extracted by your ETL script
Using the data stored in your timeseries database, you have a reporting script that you run once per month to determine peak hours, what product is most popular and during which hours, which amount of users showed interest in which products, which products are falling in and out of trending, etc.
Seeing as these codebases are still actively under construction, you want to make sure future implementations don't introduce regressions in the entire chain.
```python
from datetime import datetime, timedelta
from pytest import fixture
from requests_flask_adapter import Session
from my_webshop_app import app as feeder_app
from my_timeseries_database_app import app as timeseries_app, Series
from my_etl_project import (ETLWorker, FeederClient, TSWriter,
ConfigLoader)
from .helpers import populate_webshop
Session.register('http://feeder_app', feeder_app)
Session.register('http://timeseries_app', timeseries_app)
populate_webshop(feeder_app)
@fixture
def feeder_client():
config = ConfigLoader(location='environ')
return FeederClient(
base_url='http://feeder_app',
session=Session(),
username=config['feeder_username'],
password=contig['feeder_password']
)
@fixture
def writer():
config = ConfigLoader(location='environ')
return TSWriter(
base_url='http://timeseries_app',
session=Session(),
username=config['writer_username'],
password=contig['writer_password']
)
def test_it_can_go_end_to_end(feeder_client, writer)
now = datetime.now()
worker = ETLWorker(
feeder=feeder_client,
writer=writer,
)
worker.run()
result = Series.sum('my_serie_name', start=now, end=now + timedelta(days=1))
assert result == 42
```
| /requests-flask-adapter-0.1.0.tar.gz/requests-flask-adapter-0.1.0/README.md | 0.501709 | 0.799951 | README.md | pypi |
import pytz
import requests
import time as time_mod
from datetime import datetime
__title__ = "requests-forecast"
__version__ = "0.6.2"
__author__ = "Jeff Triplett"
__license__ = "BSD"
__copyright__ = "Copyright 2013-2016 Jeff Triplett"
DARKSKY_TEMPLATE_URI = (
"https://api.darksky.net/forecast/{key}/{latitude},{longitude}{time}"
)
DEFAULT_HEADERS = {
"Accept-Encoding": "gzip",
}
DEFAULT_TIMEZONE = "America/New_York"
ALERT_FIELDS = ("alerts",)
DATA_FIELDS = ("data",)
DECIMAL_FIELDS = (
"cloudCover",
"precipProbability",
"humidity",
)
TIME_FIELDS = (
"expires",
"time",
)
"""
Data Point Object
Data Block Object
Alerts Array
Flags Object
"""
class DataBlock(dict):
def __init__(self, data=None, timezone=None):
self.timezone = str(timezone)
if data:
for key in data.keys():
if key in DATA_FIELDS:
self.data = []
print(key)
print(data[key])
for datapoint in data[key]:
self.data.append(DataPoint(data=datapoint, timezone=timezone))
super().__init__(data)
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __repr__(self):
return "<DataBlock summary={}>".format(self["summary"])
class DataPoint(dict):
def __init__(self, data=None, timezone=None):
self.timezone = str(timezone)
if data:
for key in data.keys():
if key in ALERT_FIELDS:
self.alerts = []
for alert in data[key]:
self.alerts.append(DataPoint(data=alert, timezone=timezone))
elif key in DATA_FIELDS:
self.data = []
for datapoint in data[key]:
print(datapoint)
obj = DataPoint(data=datapoint, timezone=timezone)
print(obj)
self.data.append(obj)
elif key in DECIMAL_FIELDS:
data[key] = float(data[key]) * float("100.0")
elif key in TIME_FIELDS or key.endswith("Time"):
if timezone:
tz = pytz.timezone(str(timezone))
utc = pytz.utc
ts = datetime.utcfromtimestamp(int(data[key])).replace(
tzinfo=utc
)
data[key] = tz.normalize(ts.astimezone(tz))
else:
data[key] = datetime.fromtimestamp(int(data[key]))
super().__init__(data)
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __repr__(self):
if "summary" in self:
return f"<DataPoint summary={self.summary}>"
else:
print(self.precipProbability)
print(self.time)
print(self.precipIntensity)
return "<DataPoint {0}>" # .format(self)
class ParsedAlert:
def __init__(self, parser):
super().__init__()
self._parser = parser
@classmethod
def from_dict(klass, d, parser):
# The new ParsedArticle.
p = klass(parser=parser)
# Add all values from returned JSON object to instance.
for key, value in d.iteritems():
setattr(p, key, value)
# Update Datetimes...
return p
class ParsedCurrently:
def __init__(self, parser):
super().__init__()
self._parser = parser
class ParsedDaily:
def __init__(self, parser):
super().__init__()
self._parser = parser
class ParsedHourly:
def __init__(self, parser):
super().__init__()
self._parser = parser
class ParsedMinutely:
def __init__(self, parser):
super().__init__()
self._parser = parser
class Forecast:
json = None
timezone = None
def __init__(
self, apikey, latitude=None, longitude=None, timezone=None, time=None, **kwargs
):
self.apikey = apikey
self.latitude = latitude
self.longitude = longitude
# self.timezone = timezone
self.time = time
"""
TODO: exclude=[blocks]
TODO: extend=hourly
TODO: lang=[language]
"""
self.exclude = kwargs.get("exclude", None)
self.extend = kwargs.get("extend", None)
self.lang = kwargs.get("lang", "en")
self.units = kwargs.get("units", "auto")
if not self.apikey:
raise ValueError("No API key is set")
self.fetch()
@property
def params(self):
return {
"exclude": self.exclude,
"extend": self.extend,
"lang": self.lang,
"units": self.units,
}
def fetch(self, latitude=None, longitude=None, time=None, units=None):
if time:
time = int(time_mod.mktime(time.timetuple()))
url = DARKSKY_TEMPLATE_URI.format(
key=self.apikey,
latitude=latitude or self.latitude,
longitude=longitude or self.longitude,
time=f",{time}" if self.time else "",
)
request = requests.get(url, headers=DEFAULT_HEADERS, params=self.params)
request.raise_for_status()
self.forecast = request.json()
return self.forecast
@property
def alerts(self):
if "alerts" in self.forecast:
alerts = []
for alert in self.forecast["alerts"]:
alerts.append(DataPoint(alert, self.timezone))
return alerts
else:
return DataPoint()
@property
def currently(self):
if "currently" in self.forecast:
return DataPoint(self.forecast["currently"], self.timezone)
else:
return DataPoint()
@property
def daily(self):
if "daily" in self.forecast:
return DataPoint(self.forecast["daily"], self.timezone)
else:
return DataPoint()
@property
def hourly(self):
if "hourly" in self.forecast:
return DataPoint(self.forecast["hourly"], self.timezone)
else:
return DataPoint()
@property
def minutely(self):
if "minutely" in self.forecast:
return DataPoint(self.forecast["minutely"], self.timezone)
else:
return DataPoint()
"""
@property
def offset(self):
if 'offset' in self.forecast:
return self.forecast['offset']
return None
"""
@property
def timezone(self):
if "timezone" in self.forecast:
return pytz.timezone(self.forecast["timezone"])
else:
return None
# Alert Array
def alerts(
apikey=None, latitude=None, longitude=None, timezone=None, units=None, time=None
):
fore = Forecast(
apikey=apikey,
latitude=latitude,
longitude=longitude,
timezone=timezone,
time=time,
units=units,
)
return fore.alerts
# Data Point
def currently(
apikey=None, latitude=None, longitude=None, timezone=None, units=None, time=None
):
fore = Forecast(
apikey=apikey,
latitude=latitude,
longitude=longitude,
timezone=timezone,
time=time,
units=units,
)
return fore.currently
# Data Block
def daily(
apikey=None, latitude=None, longitude=None, timezone=None, units=None, time=None
):
fore = Forecast(
apikey=apikey,
latitude=latitude,
longitude=longitude,
timezone=timezone,
time=time,
units=units,
)
return fore.daily
# Data Block
def hourly(
apikey=None, latitude=None, longitude=None, timezone=None, units=None, time=None
):
fore = Forecast(
apikey=apikey,
latitude=latitude,
longitude=longitude,
timezone=timezone,
time=time,
units=units,
)
return fore.hourly
# Data Block
def minutely(
apikey=None, latitude=None, longitude=None, timezone=None, units=None, time=None
):
fore = Forecast(
apikey=apikey,
latitude=latitude,
longitude=longitude,
timezone=timezone,
time=time,
units=units,
)
return fore.minutely | /requests-forecast-1.0.0.tar.gz/requests-forecast-1.0.0/requests_forecast_v2.py | 0.444806 | 0.238783 | requests_forecast_v2.py | pypi |
Release History
===============
dev
---
- \[Short description of non-trivial change.\]
2.31.0 (2023-05-22)
-------------------
**Security**
- Versions of Requests between v2.3.0 and v2.30.0 are vulnerable to potential
forwarding of `Proxy-Authorization` headers to destination servers when
following HTTPS redirects.
When proxies are defined with user info (https://user:pass@proxy:8080), Requests
will construct a `Proxy-Authorization` header that is attached to the request to
authenticate with the proxy.
In cases where Requests receives a redirect response, it previously reattached
the `Proxy-Authorization` header incorrectly, resulting in the value being
sent through the tunneled connection to the destination server. Users who rely on
defining their proxy credentials in the URL are *strongly* encouraged to upgrade
to Requests 2.31.0+ to prevent unintentional leakage and rotate their proxy
credentials once the change has been fully deployed.
Users who do not use a proxy or do not supply their proxy credentials through
the user information portion of their proxy URL are not subject to this
vulnerability.
Full details can be read in our [Github Security Advisory](https://github.com/psf/requests/security/advisories/GHSA-j8r2-6x86-q33q)
and [CVE-2023-32681](https://nvd.nist.gov/vuln/detail/CVE-2023-32681).
2.30.0 (2023-05-03)
-------------------
**Dependencies**
- ⚠️ Added support for urllib3 2.0. ⚠️
This may contain minor breaking changes so we advise careful testing and
reviewing https://urllib3.readthedocs.io/en/latest/v2-migration-guide.html
prior to upgrading.
Users who wish to stay on urllib3 1.x can pin to `urllib3<2`.
2.29.0 (2023-04-26)
-------------------
**Improvements**
- Requests now defers chunked requests to the urllib3 implementation to improve
standardization. (#6226)
- Requests relaxes header component requirements to support bytes/str subclasses. (#6356)
2.28.2 (2023-01-12)
-------------------
**Dependencies**
- Requests now supports charset\_normalizer 3.x. (#6261)
**Bugfixes**
- Updated MissingSchema exception to suggest https scheme rather than http. (#6188)
2.28.1 (2022-06-29)
-------------------
**Improvements**
- Speed optimization in `iter_content` with transition to `yield from`. (#6170)
**Dependencies**
- Added support for chardet 5.0.0 (#6179)
- Added support for charset-normalizer 2.1.0 (#6169)
2.28.0 (2022-06-09)
-------------------
**Deprecations**
- ⚠️ Requests has officially dropped support for Python 2.7. ⚠️ (#6091)
- Requests has officially dropped support for Python 3.6 (including pypy3.6). (#6091)
**Improvements**
- Wrap JSON parsing issues in Request's JSONDecodeError for payloads without
an encoding to make `json()` API consistent. (#6097)
- Parse header components consistently, raising an InvalidHeader error in
all invalid cases. (#6154)
- Added provisional 3.11 support with current beta build. (#6155)
- Requests got a makeover and we decided to paint it black. (#6095)
**Bugfixes**
- Fixed bug where setting `CURL_CA_BUNDLE` to an empty string would disable
cert verification. All Requests 2.x versions before 2.28.0 are affected. (#6074)
- Fixed urllib3 exception leak, wrapping `urllib3.exceptions.SSLError` with
`requests.exceptions.SSLError` for `content` and `iter_content`. (#6057)
- Fixed issue where invalid Windows registry entries caused proxy resolution
to raise an exception rather than ignoring the entry. (#6149)
- Fixed issue where entire payload could be included in the error message for
JSONDecodeError. (#6036)
2.27.1 (2022-01-05)
-------------------
**Bugfixes**
- Fixed parsing issue that resulted in the `auth` component being
dropped from proxy URLs. (#6028)
2.27.0 (2022-01-03)
-------------------
**Improvements**
- Officially added support for Python 3.10. (#5928)
- Added a `requests.exceptions.JSONDecodeError` to unify JSON exceptions between
Python 2 and 3. This gets raised in the `response.json()` method, and is
backwards compatible as it inherits from previously thrown exceptions.
Can be caught from `requests.exceptions.RequestException` as well. (#5856)
- Improved error text for misnamed `InvalidSchema` and `MissingSchema`
exceptions. This is a temporary fix until exceptions can be renamed
(Schema->Scheme). (#6017)
- Improved proxy parsing for proxy URLs missing a scheme. This will address
recent changes to `urlparse` in Python 3.9+. (#5917)
**Bugfixes**
- Fixed defect in `extract_zipped_paths` which could result in an infinite loop
for some paths. (#5851)
- Fixed handling for `AttributeError` when calculating length of files obtained
by `Tarfile.extractfile()`. (#5239)
- Fixed urllib3 exception leak, wrapping `urllib3.exceptions.InvalidHeader` with
`requests.exceptions.InvalidHeader`. (#5914)
- Fixed bug where two Host headers were sent for chunked requests. (#5391)
- Fixed regression in Requests 2.26.0 where `Proxy-Authorization` was
incorrectly stripped from all requests sent with `Session.send`. (#5924)
- Fixed performance regression in 2.26.0 for hosts with a large number of
proxies available in the environment. (#5924)
- Fixed idna exception leak, wrapping `UnicodeError` with
`requests.exceptions.InvalidURL` for URLs with a leading dot (.) in the
domain. (#5414)
**Deprecations**
- Requests support for Python 2.7 and 3.6 will be ending in 2022. While we
don't have exact dates, Requests 2.27.x is likely to be the last release
series providing support.
2.26.0 (2021-07-13)
-------------------
**Improvements**
- Requests now supports Brotli compression, if either the `brotli` or
`brotlicffi` package is installed. (#5783)
- `Session.send` now correctly resolves proxy configurations from both
the Session and Request. Behavior now matches `Session.request`. (#5681)
**Bugfixes**
- Fixed a race condition in zip extraction when using Requests in parallel
from zip archive. (#5707)
**Dependencies**
- Instead of `chardet`, use the MIT-licensed `charset_normalizer` for Python3
to remove license ambiguity for projects bundling requests. If `chardet`
is already installed on your machine it will be used instead of `charset_normalizer`
to keep backwards compatibility. (#5797)
You can also install `chardet` while installing requests by
specifying `[use_chardet_on_py3]` extra as follows:
```shell
pip install "requests[use_chardet_on_py3]"
```
Python2 still depends upon the `chardet` module.
- Requests now supports `idna` 3.x on Python 3. `idna` 2.x will continue to
be used on Python 2 installations. (#5711)
**Deprecations**
- The `requests[security]` extra has been converted to a no-op install.
PyOpenSSL is no longer the recommended secure option for Requests. (#5867)
- Requests has officially dropped support for Python 3.5. (#5867)
2.25.1 (2020-12-16)
-------------------
**Bugfixes**
- Requests now treats `application/json` as `utf8` by default. Resolving
inconsistencies between `r.text` and `r.json` output. (#5673)
**Dependencies**
- Requests now supports chardet v4.x.
2.25.0 (2020-11-11)
-------------------
**Improvements**
- Added support for NETRC environment variable. (#5643)
**Dependencies**
- Requests now supports urllib3 v1.26.
**Deprecations**
- Requests v2.25.x will be the last release series with support for Python 3.5.
- The `requests[security]` extra is officially deprecated and will be removed
in Requests v2.26.0.
2.24.0 (2020-06-17)
-------------------
**Improvements**
- pyOpenSSL TLS implementation is now only used if Python
either doesn't have an `ssl` module or doesn't support
SNI. Previously pyOpenSSL was unconditionally used if available.
This applies even if pyOpenSSL is installed via the
`requests[security]` extra (#5443)
- Redirect resolution should now only occur when
`allow_redirects` is True. (#5492)
- No longer perform unnecessary Content-Length calculation for
requests that won't use it. (#5496)
2.23.0 (2020-02-19)
-------------------
**Improvements**
- Remove defunct reference to `prefetch` in Session `__attrs__` (#5110)
**Bugfixes**
- Requests no longer outputs password in basic auth usage warning. (#5099)
**Dependencies**
- Pinning for `chardet` and `idna` now uses major version instead of minor.
This hopefully reduces the need for releases every time a dependency is updated.
2.22.0 (2019-05-15)
-------------------
**Dependencies**
- Requests now supports urllib3 v1.25.2.
(note: 1.25.0 and 1.25.1 are incompatible)
**Deprecations**
- Requests has officially stopped support for Python 3.4.
2.21.0 (2018-12-10)
-------------------
**Dependencies**
- Requests now supports idna v2.8.
2.20.1 (2018-11-08)
-------------------
**Bugfixes**
- Fixed bug with unintended Authorization header stripping for
redirects using default ports (http/80, https/443).
2.20.0 (2018-10-18)
-------------------
**Bugfixes**
- Content-Type header parsing is now case-insensitive (e.g.
charset=utf8 v Charset=utf8).
- Fixed exception leak where certain redirect urls would raise
uncaught urllib3 exceptions.
- Requests removes Authorization header from requests redirected
from https to http on the same hostname. (CVE-2018-18074)
- `should_bypass_proxies` now handles URIs without hostnames (e.g.
files).
**Dependencies**
- Requests now supports urllib3 v1.24.
**Deprecations**
- Requests has officially stopped support for Python 2.6.
2.19.1 (2018-06-14)
-------------------
**Bugfixes**
- Fixed issue where status\_codes.py's `init` function failed trying
to append to a `__doc__` value of `None`.
2.19.0 (2018-06-12)
-------------------
**Improvements**
- Warn user about possible slowdown when using cryptography version
< 1.3.4
- Check for invalid host in proxy URL, before forwarding request to
adapter.
- Fragments are now properly maintained across redirects. (RFC7231
7.1.2)
- Removed use of cgi module to expedite library load time.
- Added support for SHA-256 and SHA-512 digest auth algorithms.
- Minor performance improvement to `Request.content`.
- Migrate to using collections.abc for 3.7 compatibility.
**Bugfixes**
- Parsing empty `Link` headers with `parse_header_links()` no longer
return one bogus entry.
- Fixed issue where loading the default certificate bundle from a zip
archive would raise an `IOError`.
- Fixed issue with unexpected `ImportError` on windows system which do
not support `winreg` module.
- DNS resolution in proxy bypass no longer includes the username and
password in the request. This also fixes the issue of DNS queries
failing on macOS.
- Properly normalize adapter prefixes for url comparison.
- Passing `None` as a file pointer to the `files` param no longer
raises an exception.
- Calling `copy` on a `RequestsCookieJar` will now preserve the cookie
policy correctly.
**Dependencies**
- We now support idna v2.7.
- We now support urllib3 v1.23.
2.18.4 (2017-08-15)
-------------------
**Improvements**
- Error messages for invalid headers now include the header name for
easier debugging
**Dependencies**
- We now support idna v2.6.
2.18.3 (2017-08-02)
-------------------
**Improvements**
- Running `$ python -m requests.help` now includes the installed
version of idna.
**Bugfixes**
- Fixed issue where Requests would raise `ConnectionError` instead of
`SSLError` when encountering SSL problems when using urllib3 v1.22.
2.18.2 (2017-07-25)
-------------------
**Bugfixes**
- `requests.help` no longer fails on Python 2.6 due to the absence of
`ssl.OPENSSL_VERSION_NUMBER`.
**Dependencies**
- We now support urllib3 v1.22.
2.18.1 (2017-06-14)
-------------------
**Bugfixes**
- Fix an error in the packaging whereby the `*.whl` contained
incorrect data that regressed the fix in v2.17.3.
2.18.0 (2017-06-14)
-------------------
**Improvements**
- `Response` is now a context manager, so can be used directly in a
`with` statement without first having to be wrapped by
`contextlib.closing()`.
**Bugfixes**
- Resolve installation failure if multiprocessing is not available
- Resolve tests crash if multiprocessing is not able to determine the
number of CPU cores
- Resolve error swallowing in utils set\_environ generator
2.17.3 (2017-05-29)
-------------------
**Improvements**
- Improved `packages` namespace identity support, for monkeypatching
libraries.
2.17.2 (2017-05-29)
-------------------
**Improvements**
- Improved `packages` namespace identity support, for monkeypatching
libraries.
2.17.1 (2017-05-29)
-------------------
**Improvements**
- Improved `packages` namespace identity support, for monkeypatching
libraries.
2.17.0 (2017-05-29)
-------------------
**Improvements**
- Removal of the 301 redirect cache. This improves thread-safety.
2.16.5 (2017-05-28)
-------------------
- Improvements to `$ python -m requests.help`.
2.16.4 (2017-05-27)
-------------------
- Introduction of the `$ python -m requests.help` command, for
debugging with maintainers!
2.16.3 (2017-05-27)
-------------------
- Further restored the `requests.packages` namespace for compatibility
reasons.
2.16.2 (2017-05-27)
-------------------
- Further restored the `requests.packages` namespace for compatibility
reasons.
No code modification (noted below) should be necessary any longer.
2.16.1 (2017-05-27)
-------------------
- Restored the `requests.packages` namespace for compatibility
reasons.
- Bugfix for `urllib3` version parsing.
**Note**: code that was written to import against the
`requests.packages` namespace previously will have to import code that
rests at this module-level now.
For example:
from requests.packages.urllib3.poolmanager import PoolManager
Will need to be re-written to be:
from requests.packages import urllib3
urllib3.poolmanager.PoolManager
Or, even better:
from urllib3.poolmanager import PoolManager
2.16.0 (2017-05-26)
-------------------
- Unvendor ALL the things!
2.15.1 (2017-05-26)
-------------------
- Everyone makes mistakes.
2.15.0 (2017-05-26)
-------------------
**Improvements**
- Introduction of the `Response.next` property, for getting the next
`PreparedResponse` from a redirect chain (when
`allow_redirects=False`).
- Internal refactoring of `__version__` module.
**Bugfixes**
- Restored once-optional parameter for
`requests.utils.get_environ_proxies()`.
2.14.2 (2017-05-10)
-------------------
**Bugfixes**
- Changed a less-than to an equal-to and an or in the dependency
markers to widen compatibility with older setuptools releases.
2.14.1 (2017-05-09)
-------------------
**Bugfixes**
- Changed the dependency markers to widen compatibility with older pip
releases.
2.14.0 (2017-05-09)
-------------------
**Improvements**
- It is now possible to pass `no_proxy` as a key to the `proxies`
dictionary to provide handling similar to the `NO_PROXY` environment
variable.
- When users provide invalid paths to certificate bundle files or
directories Requests now raises `IOError`, rather than failing at
the time of the HTTPS request with a fairly inscrutable certificate
validation error.
- The behavior of `SessionRedirectMixin` was slightly altered.
`resolve_redirects` will now detect a redirect by calling
`get_redirect_target(response)` instead of directly querying
`Response.is_redirect` and `Response.headers['location']`. Advanced
users will be able to process malformed redirects more easily.
- Changed the internal calculation of elapsed request time to have
higher resolution on Windows.
- Added `win_inet_pton` as conditional dependency for the `[socks]`
extra on Windows with Python 2.7.
- Changed the proxy bypass implementation on Windows: the proxy bypass
check doesn't use forward and reverse DNS requests anymore
- URLs with schemes that begin with `http` but are not `http` or
`https` no longer have their host parts forced to lowercase.
**Bugfixes**
- Much improved handling of non-ASCII `Location` header values in
redirects. Fewer `UnicodeDecodeErrors` are encountered on Python 2,
and Python 3 now correctly understands that Latin-1 is unlikely to
be the correct encoding.
- If an attempt to `seek` file to find out its length fails, we now
appropriately handle that by aborting our content-length
calculations.
- Restricted `HTTPDigestAuth` to only respond to auth challenges made
on 4XX responses, rather than to all auth challenges.
- Fixed some code that was firing `DeprecationWarning` on Python 3.6.
- The dismayed person emoticon (`/o\\`) no longer has a big head. I'm
sure this is what you were all worrying about most.
**Miscellaneous**
- Updated bundled urllib3 to v1.21.1.
- Updated bundled chardet to v3.0.2.
- Updated bundled idna to v2.5.
- Updated bundled certifi to 2017.4.17.
2.13.0 (2017-01-24)
-------------------
**Features**
- Only load the `idna` library when we've determined we need it. This
will save some memory for users.
**Miscellaneous**
- Updated bundled urllib3 to 1.20.
- Updated bundled idna to 2.2.
2.12.5 (2017-01-18)
-------------------
**Bugfixes**
- Fixed an issue with JSON encoding detection, specifically detecting
big-endian UTF-32 with BOM.
2.12.4 (2016-12-14)
-------------------
**Bugfixes**
- Fixed regression from 2.12.2 where non-string types were rejected in
the basic auth parameters. While support for this behaviour has been
re-added, the behaviour is deprecated and will be removed in the
future.
2.12.3 (2016-12-01)
-------------------
**Bugfixes**
- Fixed regression from v2.12.1 for URLs with schemes that begin with
"http". These URLs have historically been processed as though they
were HTTP-schemed URLs, and so have had parameters added. This was
removed in v2.12.2 in an overzealous attempt to resolve problems
with IDNA-encoding those URLs. This change was reverted: the other
fixes for IDNA-encoding have been judged to be sufficient to return
to the behaviour Requests had before v2.12.0.
2.12.2 (2016-11-30)
-------------------
**Bugfixes**
- Fixed several issues with IDNA-encoding URLs that are technically
invalid but which are widely accepted. Requests will now attempt to
IDNA-encode a URL if it can but, if it fails, and the host contains
only ASCII characters, it will be passed through optimistically.
This will allow users to opt-in to using IDNA2003 themselves if they
want to, and will also allow technically invalid but still common
hostnames.
- Fixed an issue where URLs with leading whitespace would raise
`InvalidSchema` errors.
- Fixed an issue where some URLs without the HTTP or HTTPS schemes
would still have HTTP URL preparation applied to them.
- Fixed an issue where Unicode strings could not be used in basic
auth.
- Fixed an issue encountered by some Requests plugins where
constructing a Response object would cause `Response.content` to
raise an `AttributeError`.
2.12.1 (2016-11-16)
-------------------
**Bugfixes**
- Updated setuptools 'security' extra for the new PyOpenSSL backend in
urllib3.
**Miscellaneous**
- Updated bundled urllib3 to 1.19.1.
2.12.0 (2016-11-15)
-------------------
**Improvements**
- Updated support for internationalized domain names from IDNA2003 to
IDNA2008. This updated support is required for several forms of IDNs
and is mandatory for .de domains.
- Much improved heuristics for guessing content lengths: Requests will
no longer read an entire `StringIO` into memory.
- Much improved logic for recalculating `Content-Length` headers for
`PreparedRequest` objects.
- Improved tolerance for file-like objects that have no `tell` method
but do have a `seek` method.
- Anything that is a subclass of `Mapping` is now treated like a
dictionary by the `data=` keyword argument.
- Requests now tolerates empty passwords in proxy credentials, rather
than stripping the credentials.
- If a request is made with a file-like object as the body and that
request is redirected with a 307 or 308 status code, Requests will
now attempt to rewind the body object so it can be replayed.
**Bugfixes**
- When calling `response.close`, the call to `close` will be
propagated through to non-urllib3 backends.
- Fixed issue where the `ALL_PROXY` environment variable would be
preferred over scheme-specific variables like `HTTP_PROXY`.
- Fixed issue where non-UTF8 reason phrases got severely mangled by
falling back to decoding using ISO 8859-1 instead.
- Fixed a bug where Requests would not correctly correlate cookies set
when using custom Host headers if those Host headers did not use the
native string type for the platform.
**Miscellaneous**
- Updated bundled urllib3 to 1.19.
- Updated bundled certifi certs to 2016.09.26.
2.11.1 (2016-08-17)
-------------------
**Bugfixes**
- Fixed a bug when using `iter_content` with `decode_unicode=True` for
streamed bodies would raise `AttributeError`. This bug was
introduced in 2.11.
- Strip Content-Type and Transfer-Encoding headers from the header
block when following a redirect that transforms the verb from
POST/PUT to GET.
2.11.0 (2016-08-08)
-------------------
**Improvements**
- Added support for the `ALL_PROXY` environment variable.
- Reject header values that contain leading whitespace or newline
characters to reduce risk of header smuggling.
**Bugfixes**
- Fixed occasional `TypeError` when attempting to decode a JSON
response that occurred in an error case. Now correctly returns a
`ValueError`.
- Requests would incorrectly ignore a non-CIDR IP address in the
`NO_PROXY` environment variables: Requests now treats it as a
specific IP.
- Fixed a bug when sending JSON data that could cause us to encounter
obscure OpenSSL errors in certain network conditions (yes, really).
- Added type checks to ensure that `iter_content` only accepts
integers and `None` for chunk sizes.
- Fixed issue where responses whose body had not been fully consumed
would have the underlying connection closed but not returned to the
connection pool, which could cause Requests to hang in situations
where the `HTTPAdapter` had been configured to use a blocking
connection pool.
**Miscellaneous**
- Updated bundled urllib3 to 1.16.
- Some previous releases accidentally accepted non-strings as
acceptable header values. This release does not.
2.10.0 (2016-04-29)
-------------------
**New Features**
- SOCKS Proxy Support! (requires PySocks;
`$ pip install requests[socks]`)
**Miscellaneous**
- Updated bundled urllib3 to 1.15.1.
2.9.2 (2016-04-29)
------------------
**Improvements**
- Change built-in CaseInsensitiveDict (used for headers) to use
OrderedDict as its underlying datastore.
**Bugfixes**
- Don't use redirect\_cache if allow\_redirects=False
- When passed objects that throw exceptions from `tell()`, send them
via chunked transfer encoding instead of failing.
- Raise a ProxyError for proxy related connection issues.
2.9.1 (2015-12-21)
------------------
**Bugfixes**
- Resolve regression introduced in 2.9.0 that made it impossible to
send binary strings as bodies in Python 3.
- Fixed errors when calculating cookie expiration dates in certain
locales.
**Miscellaneous**
- Updated bundled urllib3 to 1.13.1.
2.9.0 (2015-12-15)
------------------
**Minor Improvements** (Backwards compatible)
- The `verify` keyword argument now supports being passed a path to a
directory of CA certificates, not just a single-file bundle.
- Warnings are now emitted when sending files opened in text mode.
- Added the 511 Network Authentication Required status code to the
status code registry.
**Bugfixes**
- For file-like objects that are not sought to the very beginning, we
now send the content length for the number of bytes we will actually
read, rather than the total size of the file, allowing partial file
uploads.
- When uploading file-like objects, if they are empty or have no
obvious content length we set `Transfer-Encoding: chunked` rather
than `Content-Length: 0`.
- We correctly receive the response in buffered mode when uploading
chunked bodies.
- We now handle being passed a query string as a bytestring on Python
3, by decoding it as UTF-8.
- Sessions are now closed in all cases (exceptional and not) when
using the functional API rather than leaking and waiting for the
garbage collector to clean them up.
- Correctly handle digest auth headers with a malformed `qop`
directive that contains no token, by treating it the same as if no
`qop` directive was provided at all.
- Minor performance improvements when removing specific cookies by
name.
**Miscellaneous**
- Updated urllib3 to 1.13.
2.8.1 (2015-10-13)
------------------
**Bugfixes**
- Update certificate bundle to match `certifi` 2015.9.6.2's weak
certificate bundle.
- Fix a bug in 2.8.0 where requests would raise `ConnectTimeout`
instead of `ConnectionError`
- When using the PreparedRequest flow, requests will now correctly
respect the `json` parameter. Broken in 2.8.0.
- When using the PreparedRequest flow, requests will now correctly
handle a Unicode-string method name on Python 2. Broken in 2.8.0.
2.8.0 (2015-10-05)
------------------
**Minor Improvements** (Backwards Compatible)
- Requests now supports per-host proxies. This allows the `proxies`
dictionary to have entries of the form
`{'<scheme>://<hostname>': '<proxy>'}`. Host-specific proxies will
be used in preference to the previously-supported scheme-specific
ones, but the previous syntax will continue to work.
- `Response.raise_for_status` now prints the URL that failed as part
of the exception message.
- `requests.utils.get_netrc_auth` now takes an `raise_errors` kwarg,
defaulting to `False`. When `True`, errors parsing `.netrc` files
cause exceptions to be thrown.
- Change to bundled projects import logic to make it easier to
unbundle requests downstream.
- Changed the default User-Agent string to avoid leaking data on
Linux: now contains only the requests version.
**Bugfixes**
- The `json` parameter to `post()` and friends will now only be used
if neither `data` nor `files` are present, consistent with the
documentation.
- We now ignore empty fields in the `NO_PROXY` environment variable.
- Fixed problem where `httplib.BadStatusLine` would get raised if
combining `stream=True` with `contextlib.closing`.
- Prevented bugs where we would attempt to return the same connection
back to the connection pool twice when sending a Chunked body.
- Miscellaneous minor internal changes.
- Digest Auth support is now thread safe.
**Updates**
- Updated urllib3 to 1.12.
2.7.0 (2015-05-03)
------------------
This is the first release that follows our new release process. For
more, see [our
documentation](https://requests.readthedocs.io/en/latest/community/release-process/).
**Bugfixes**
- Updated urllib3 to 1.10.4, resolving several bugs involving chunked
transfer encoding and response framing.
2.6.2 (2015-04-23)
------------------
**Bugfixes**
- Fix regression where compressed data that was sent as chunked data
was not properly decompressed. (\#2561)
2.6.1 (2015-04-22)
------------------
**Bugfixes**
- Remove VendorAlias import machinery introduced in v2.5.2.
- Simplify the PreparedRequest.prepare API: We no longer require the
user to pass an empty list to the hooks keyword argument. (c.f.
\#2552)
- Resolve redirects now receives and forwards all of the original
arguments to the adapter. (\#2503)
- Handle UnicodeDecodeErrors when trying to deal with a unicode URL
that cannot be encoded in ASCII. (\#2540)
- Populate the parsed path of the URI field when performing Digest
Authentication. (\#2426)
- Copy a PreparedRequest's CookieJar more reliably when it is not an
instance of RequestsCookieJar. (\#2527)
2.6.0 (2015-03-14)
------------------
**Bugfixes**
- CVE-2015-2296: Fix handling of cookies on redirect. Previously a
cookie without a host value set would use the hostname for the
redirected URL exposing requests users to session fixation attacks
and potentially cookie stealing. This was disclosed privately by
Matthew Daley of [BugFuzz](https://bugfuzz.com). This affects all
versions of requests from v2.1.0 to v2.5.3 (inclusive on both ends).
- Fix error when requests is an `install_requires` dependency and
`python setup.py test` is run. (\#2462)
- Fix error when urllib3 is unbundled and requests continues to use
the vendored import location.
- Include fixes to `urllib3`'s header handling.
- Requests' handling of unvendored dependencies is now more
restrictive.
**Features and Improvements**
- Support bytearrays when passed as parameters in the `files`
argument. (\#2468)
- Avoid data duplication when creating a request with `str`, `bytes`,
or `bytearray` input to the `files` argument.
2.5.3 (2015-02-24)
------------------
**Bugfixes**
- Revert changes to our vendored certificate bundle. For more context
see (\#2455, \#2456, and <https://bugs.python.org/issue23476>)
2.5.2 (2015-02-23)
------------------
**Features and Improvements**
- Add sha256 fingerprint support.
([shazow/urllib3\#540](https://github.com/shazow/urllib3/pull/540))
- Improve the performance of headers.
([shazow/urllib3\#544](https://github.com/shazow/urllib3/pull/544))
**Bugfixes**
- Copy pip's import machinery. When downstream redistributors remove
requests.packages.urllib3 the import machinery will continue to let
those same symbols work. Example usage in requests' documentation
and 3rd-party libraries relying on the vendored copies of urllib3
will work without having to fallback to the system urllib3.
- Attempt to quote parts of the URL on redirect if unquoting and then
quoting fails. (\#2356)
- Fix filename type check for multipart form-data uploads. (\#2411)
- Properly handle the case where a server issuing digest
authentication challenges provides both auth and auth-int
qop-values. (\#2408)
- Fix a socket leak.
([shazow/urllib3\#549](https://github.com/shazow/urllib3/pull/549))
- Fix multiple `Set-Cookie` headers properly.
([shazow/urllib3\#534](https://github.com/shazow/urllib3/pull/534))
- Disable the built-in hostname verification.
([shazow/urllib3\#526](https://github.com/shazow/urllib3/pull/526))
- Fix the behaviour of decoding an exhausted stream.
([shazow/urllib3\#535](https://github.com/shazow/urllib3/pull/535))
**Security**
- Pulled in an updated `cacert.pem`.
- Drop RC4 from the default cipher list.
([shazow/urllib3\#551](https://github.com/shazow/urllib3/pull/551))
2.5.1 (2014-12-23)
------------------
**Behavioural Changes**
- Only catch HTTPErrors in raise\_for\_status (\#2382)
**Bugfixes**
- Handle LocationParseError from urllib3 (\#2344)
- Handle file-like object filenames that are not strings (\#2379)
- Unbreak HTTPDigestAuth handler. Allow new nonces to be negotiated
(\#2389)
2.5.0 (2014-12-01)
------------------
**Improvements**
- Allow usage of urllib3's Retry object with HTTPAdapters (\#2216)
- The `iter_lines` method on a response now accepts a delimiter with
which to split the content (\#2295)
**Behavioural Changes**
- Add deprecation warnings to functions in requests.utils that will be
removed in 3.0 (\#2309)
- Sessions used by the functional API are always closed (\#2326)
- Restrict requests to HTTP/1.1 and HTTP/1.0 (stop accepting HTTP/0.9)
(\#2323)
**Bugfixes**
- Only parse the URL once (\#2353)
- Allow Content-Length header to always be overridden (\#2332)
- Properly handle files in HTTPDigestAuth (\#2333)
- Cap redirect\_cache size to prevent memory abuse (\#2299)
- Fix HTTPDigestAuth handling of redirects after authenticating
successfully (\#2253)
- Fix crash with custom method parameter to Session.request (\#2317)
- Fix how Link headers are parsed using the regular expression library
(\#2271)
**Documentation**
- Add more references for interlinking (\#2348)
- Update CSS for theme (\#2290)
- Update width of buttons and sidebar (\#2289)
- Replace references of Gittip with Gratipay (\#2282)
- Add link to changelog in sidebar (\#2273)
2.4.3 (2014-10-06)
------------------
**Bugfixes**
- Unicode URL improvements for Python 2.
- Re-order JSON param for backwards compat.
- Automatically defrag authentication schemes from host/pass URIs.
([\#2249](https://github.com/psf/requests/issues/2249))
2.4.2 (2014-10-05)
------------------
**Improvements**
- FINALLY! Add json parameter for uploads!
([\#2258](https://github.com/psf/requests/pull/2258))
- Support for bytestring URLs on Python 3.x
([\#2238](https://github.com/psf/requests/pull/2238))
**Bugfixes**
- Avoid getting stuck in a loop
([\#2244](https://github.com/psf/requests/pull/2244))
- Multiple calls to iter\* fail with unhelpful error.
([\#2240](https://github.com/psf/requests/issues/2240),
[\#2241](https://github.com/psf/requests/issues/2241))
**Documentation**
- Correct redirection introduction
([\#2245](https://github.com/psf/requests/pull/2245/))
- Added example of how to send multiple files in one request.
([\#2227](https://github.com/psf/requests/pull/2227/))
- Clarify how to pass a custom set of CAs
([\#2248](https://github.com/psf/requests/pull/2248/))
2.4.1 (2014-09-09)
------------------
- Now has a "security" package extras set,
`$ pip install requests[security]`
- Requests will now use Certifi if it is available.
- Capture and re-raise urllib3 ProtocolError
- Bugfix for responses that attempt to redirect to themselves forever
(wtf?).
2.4.0 (2014-08-29)
------------------
**Behavioral Changes**
- `Connection: keep-alive` header is now sent automatically.
**Improvements**
- Support for connect timeouts! Timeout now accepts a tuple (connect,
read) which is used to set individual connect and read timeouts.
- Allow copying of PreparedRequests without headers/cookies.
- Updated bundled urllib3 version.
- Refactored settings loading from environment -- new
Session.merge\_environment\_settings.
- Handle socket errors in iter\_content.
2.3.0 (2014-05-16)
------------------
**API Changes**
- New `Response` property `is_redirect`, which is true when the
library could have processed this response as a redirection (whether
or not it actually did).
- The `timeout` parameter now affects requests with both `stream=True`
and `stream=False` equally.
- The change in v2.0.0 to mandate explicit proxy schemes has been
reverted. Proxy schemes now default to `http://`.
- The `CaseInsensitiveDict` used for HTTP headers now behaves like a
normal dictionary when references as string or viewed in the
interpreter.
**Bugfixes**
- No longer expose Authorization or Proxy-Authorization headers on
redirect. Fix CVE-2014-1829 and CVE-2014-1830 respectively.
- Authorization is re-evaluated each redirect.
- On redirect, pass url as native strings.
- Fall-back to autodetected encoding for JSON when Unicode detection
fails.
- Headers set to `None` on the `Session` are now correctly not sent.
- Correctly honor `decode_unicode` even if it wasn't used earlier in
the same response.
- Stop advertising `compress` as a supported Content-Encoding.
- The `Response.history` parameter is now always a list.
- Many, many `urllib3` bugfixes.
2.2.1 (2014-01-23)
------------------
**Bugfixes**
- Fixes incorrect parsing of proxy credentials that contain a literal
or encoded '\#' character.
- Assorted urllib3 fixes.
2.2.0 (2014-01-09)
------------------
**API Changes**
- New exception: `ContentDecodingError`. Raised instead of `urllib3`
`DecodeError` exceptions.
**Bugfixes**
- Avoid many many exceptions from the buggy implementation of
`proxy_bypass` on OS X in Python 2.6.
- Avoid crashing when attempting to get authentication credentials
from \~/.netrc when running as a user without a home directory.
- Use the correct pool size for pools of connections to proxies.
- Fix iteration of `CookieJar` objects.
- Ensure that cookies are persisted over redirect.
- Switch back to using chardet, since it has merged with charade.
2.1.0 (2013-12-05)
------------------
- Updated CA Bundle, of course.
- Cookies set on individual Requests through a `Session` (e.g. via
`Session.get()`) are no longer persisted to the `Session`.
- Clean up connections when we hit problems during chunked upload,
rather than leaking them.
- Return connections to the pool when a chunked upload is successful,
rather than leaking it.
- Match the HTTPbis recommendation for HTTP 301 redirects.
- Prevent hanging when using streaming uploads and Digest Auth when a
401 is received.
- Values of headers set by Requests are now always the native string
type.
- Fix previously broken SNI support.
- Fix accessing HTTP proxies using proxy authentication.
- Unencode HTTP Basic usernames and passwords extracted from URLs.
- Support for IP address ranges for no\_proxy environment variable
- Parse headers correctly when users override the default `Host:`
header.
- Avoid munging the URL in case of case-sensitive servers.
- Looser URL handling for non-HTTP/HTTPS urls.
- Accept unicode methods in Python 2.6 and 2.7.
- More resilient cookie handling.
- Make `Response` objects pickleable.
- Actually added MD5-sess to Digest Auth instead of pretending to like
last time.
- Updated internal urllib3.
- Fixed @Lukasa's lack of taste.
2.0.1 (2013-10-24)
------------------
- Updated included CA Bundle with new mistrusts and automated process
for the future
- Added MD5-sess to Digest Auth
- Accept per-file headers in multipart file POST messages.
- Fixed: Don't send the full URL on CONNECT messages.
- Fixed: Correctly lowercase a redirect scheme.
- Fixed: Cookies not persisted when set via functional API.
- Fixed: Translate urllib3 ProxyError into a requests ProxyError
derived from ConnectionError.
- Updated internal urllib3 and chardet.
2.0.0 (2013-09-24)
------------------
**API Changes:**
- Keys in the Headers dictionary are now native strings on all Python
versions, i.e. bytestrings on Python 2, unicode on Python 3.
- Proxy URLs now *must* have an explicit scheme. A `MissingSchema`
exception will be raised if they don't.
- Timeouts now apply to read time if `Stream=False`.
- `RequestException` is now a subclass of `IOError`, not
`RuntimeError`.
- Added new method to `PreparedRequest` objects:
`PreparedRequest.copy()`.
- Added new method to `Session` objects: `Session.update_request()`.
This method updates a `Request` object with the data (e.g. cookies)
stored on the `Session`.
- Added new method to `Session` objects: `Session.prepare_request()`.
This method updates and prepares a `Request` object, and returns the
corresponding `PreparedRequest` object.
- Added new method to `HTTPAdapter` objects:
`HTTPAdapter.proxy_headers()`. This should not be called directly,
but improves the subclass interface.
- `httplib.IncompleteRead` exceptions caused by incorrect chunked
encoding will now raise a Requests `ChunkedEncodingError` instead.
- Invalid percent-escape sequences now cause a Requests `InvalidURL`
exception to be raised.
- HTTP 208 no longer uses reason phrase `"im_used"`. Correctly uses
`"already_reported"`.
- HTTP 226 reason added (`"im_used"`).
**Bugfixes:**
- Vastly improved proxy support, including the CONNECT verb. Special
thanks to the many contributors who worked towards this improvement.
- Cookies are now properly managed when 401 authentication responses
are received.
- Chunked encoding fixes.
- Support for mixed case schemes.
- Better handling of streaming downloads.
- Retrieve environment proxies from more locations.
- Minor cookies fixes.
- Improved redirect behaviour.
- Improved streaming behaviour, particularly for compressed data.
- Miscellaneous small Python 3 text encoding bugs.
- `.netrc` no longer overrides explicit auth.
- Cookies set by hooks are now correctly persisted on Sessions.
- Fix problem with cookies that specify port numbers in their host
field.
- `BytesIO` can be used to perform streaming uploads.
- More generous parsing of the `no_proxy` environment variable.
- Non-string objects can be passed in data values alongside files.
1.2.3 (2013-05-25)
------------------
- Simple packaging fix
1.2.2 (2013-05-23)
------------------
- Simple packaging fix
1.2.1 (2013-05-20)
------------------
- 301 and 302 redirects now change the verb to GET for all verbs, not
just POST, improving browser compatibility.
- Python 3.3.2 compatibility
- Always percent-encode location headers
- Fix connection adapter matching to be most-specific first
- new argument to the default connection adapter for passing a block
argument
- prevent a KeyError when there's no link headers
1.2.0 (2013-03-31)
------------------
- Fixed cookies on sessions and on requests
- Significantly change how hooks are dispatched - hooks now receive
all the arguments specified by the user when making a request so
hooks can make a secondary request with the same parameters. This is
especially necessary for authentication handler authors
- certifi support was removed
- Fixed bug where using OAuth 1 with body `signature_type` sent no
data
- Major proxy work thanks to @Lukasa including parsing of proxy
authentication from the proxy url
- Fix DigestAuth handling too many 401s
- Update vendored urllib3 to include SSL bug fixes
- Allow keyword arguments to be passed to `json.loads()` via the
`Response.json()` method
- Don't send `Content-Length` header by default on `GET` or `HEAD`
requests
- Add `elapsed` attribute to `Response` objects to time how long a
request took.
- Fix `RequestsCookieJar`
- Sessions and Adapters are now picklable, i.e., can be used with the
multiprocessing library
- Update charade to version 1.0.3
The change in how hooks are dispatched will likely cause a great deal of
issues.
1.1.0 (2013-01-10)
------------------
- CHUNKED REQUESTS
- Support for iterable response bodies
- Assume servers persist redirect params
- Allow explicit content types to be specified for file data
- Make merge\_kwargs case-insensitive when looking up keys
1.0.3 (2012-12-18)
------------------
- Fix file upload encoding bug
- Fix cookie behavior
1.0.2 (2012-12-17)
------------------
- Proxy fix for HTTPAdapter.
1.0.1 (2012-12-17)
------------------
- Cert verification exception bug.
- Proxy fix for HTTPAdapter.
1.0.0 (2012-12-17)
------------------
- Massive Refactor and Simplification
- Switch to Apache 2.0 license
- Swappable Connection Adapters
- Mountable Connection Adapters
- Mutable ProcessedRequest chain
- /s/prefetch/stream
- Removal of all configuration
- Standard library logging
- Make Response.json() callable, not property.
- Usage of new charade project, which provides python 2 and 3
simultaneous chardet.
- Removal of all hooks except 'response'
- Removal of all authentication helpers (OAuth, Kerberos)
This is not a backwards compatible change.
0.14.2 (2012-10-27)
-------------------
- Improved mime-compatible JSON handling
- Proxy fixes
- Path hack fixes
- Case-Insensitive Content-Encoding headers
- Support for CJK parameters in form posts
0.14.1 (2012-10-01)
-------------------
- Python 3.3 Compatibility
- Simply default accept-encoding
- Bugfixes
0.14.0 (2012-09-02)
-------------------
- No more iter\_content errors if already downloaded.
0.13.9 (2012-08-25)
-------------------
- Fix for OAuth + POSTs
- Remove exception eating from dispatch\_hook
- General bugfixes
0.13.8 (2012-08-21)
-------------------
- Incredible Link header support :)
0.13.7 (2012-08-19)
-------------------
- Support for (key, value) lists everywhere.
- Digest Authentication improvements.
- Ensure proxy exclusions work properly.
- Clearer UnicodeError exceptions.
- Automatic casting of URLs to strings (fURL and such)
- Bugfixes.
0.13.6 (2012-08-06)
-------------------
- Long awaited fix for hanging connections!
0.13.5 (2012-07-27)
-------------------
- Packaging fix
0.13.4 (2012-07-27)
-------------------
- GSSAPI/Kerberos authentication!
- App Engine 2.7 Fixes!
- Fix leaking connections (from urllib3 update)
- OAuthlib path hack fix
- OAuthlib URL parameters fix.
0.13.3 (2012-07-12)
-------------------
- Use simplejson if available.
- Do not hide SSLErrors behind Timeouts.
- Fixed param handling with urls containing fragments.
- Significantly improved information in User Agent.
- client certificates are ignored when verify=False
0.13.2 (2012-06-28)
-------------------
- Zero dependencies (once again)!
- New: Response.reason
- Sign querystring parameters in OAuth 1.0
- Client certificates no longer ignored when verify=False
- Add openSUSE certificate support
0.13.1 (2012-06-07)
-------------------
- Allow passing a file or file-like object as data.
- Allow hooks to return responses that indicate errors.
- Fix Response.text and Response.json for body-less responses.
0.13.0 (2012-05-29)
-------------------
- Removal of Requests.async in favor of
[grequests](https://github.com/kennethreitz/grequests)
- Allow disabling of cookie persistence.
- New implementation of safe\_mode
- cookies.get now supports default argument
- Session cookies not saved when Session.request is called with
return\_response=False
- Env: no\_proxy support.
- RequestsCookieJar improvements.
- Various bug fixes.
0.12.1 (2012-05-08)
-------------------
- New `Response.json` property.
- Ability to add string file uploads.
- Fix out-of-range issue with iter\_lines.
- Fix iter\_content default size.
- Fix POST redirects containing files.
0.12.0 (2012-05-02)
-------------------
- EXPERIMENTAL OAUTH SUPPORT!
- Proper CookieJar-backed cookies interface with awesome dict-like
interface.
- Speed fix for non-iterated content chunks.
- Move `pre_request` to a more usable place.
- New `pre_send` hook.
- Lazily encode data, params, files.
- Load system Certificate Bundle if `certify` isn't available.
- Cleanups, fixes.
0.11.2 (2012-04-22)
-------------------
- Attempt to use the OS's certificate bundle if `certifi` isn't
available.
- Infinite digest auth redirect fix.
- Multi-part file upload improvements.
- Fix decoding of invalid %encodings in URLs.
- If there is no content in a response don't throw an error the second
time that content is attempted to be read.
- Upload data on redirects.
0.11.1 (2012-03-30)
-------------------
- POST redirects now break RFC to do what browsers do: Follow up with
a GET.
- New `strict_mode` configuration to disable new redirect behavior.
0.11.0 (2012-03-14)
-------------------
- Private SSL Certificate support
- Remove select.poll from Gevent monkeypatching
- Remove redundant generator for chunked transfer encoding
- Fix: Response.ok raises Timeout Exception in safe\_mode
0.10.8 (2012-03-09)
-------------------
- Generate chunked ValueError fix
- Proxy configuration by environment variables
- Simplification of iter\_lines.
- New trust\_env configuration for disabling system/environment hints.
- Suppress cookie errors.
0.10.7 (2012-03-07)
-------------------
- encode\_uri = False
0.10.6 (2012-02-25)
-------------------
- Allow '=' in cookies.
0.10.5 (2012-02-25)
-------------------
- Response body with 0 content-length fix.
- New async.imap.
- Don't fail on netrc.
0.10.4 (2012-02-20)
-------------------
- Honor netrc.
0.10.3 (2012-02-20)
-------------------
- HEAD requests don't follow redirects anymore.
- raise\_for\_status() doesn't raise for 3xx anymore.
- Make Session objects picklable.
- ValueError for invalid schema URLs.
0.10.2 (2012-01-15)
-------------------
- Vastly improved URL quoting.
- Additional allowed cookie key values.
- Attempted fix for "Too many open files" Error
- Replace unicode errors on first pass, no need for second pass.
- Append '/' to bare-domain urls before query insertion.
- Exceptions now inherit from RuntimeError.
- Binary uploads + auth fix.
- Bugfixes.
0.10.1 (2012-01-23)
-------------------
- PYTHON 3 SUPPORT!
- Dropped 2.5 Support. (*Backwards Incompatible*)
0.10.0 (2012-01-21)
-------------------
- `Response.content` is now bytes-only. (*Backwards Incompatible*)
- New `Response.text` is unicode-only.
- If no `Response.encoding` is specified and `chardet` is available,
`Response.text` will guess an encoding.
- Default to ISO-8859-1 (Western) encoding for "text" subtypes.
- Removal of decode\_unicode. (*Backwards Incompatible*)
- New multiple-hooks system.
- New `Response.register_hook` for registering hooks within the
pipeline.
- `Response.url` is now Unicode.
0.9.3 (2012-01-18)
------------------
- SSL verify=False bugfix (apparent on windows machines).
0.9.2 (2012-01-18)
------------------
- Asynchronous async.send method.
- Support for proper chunk streams with boundaries.
- session argument for Session classes.
- Print entire hook tracebacks, not just exception instance.
- Fix response.iter\_lines from pending next line.
- Fix but in HTTP-digest auth w/ URI having query strings.
- Fix in Event Hooks section.
- Urllib3 update.
0.9.1 (2012-01-06)
------------------
- danger\_mode for automatic Response.raise\_for\_status()
- Response.iter\_lines refactor
0.9.0 (2011-12-28)
------------------
- verify ssl is default.
0.8.9 (2011-12-28)
------------------
- Packaging fix.
0.8.8 (2011-12-28)
------------------
- SSL CERT VERIFICATION!
- Release of Cerifi: Mozilla's cert list.
- New 'verify' argument for SSL requests.
- Urllib3 update.
0.8.7 (2011-12-24)
------------------
- iter\_lines last-line truncation fix
- Force safe\_mode for async requests
- Handle safe\_mode exceptions more consistently
- Fix iteration on null responses in safe\_mode
0.8.6 (2011-12-18)
------------------
- Socket timeout fixes.
- Proxy Authorization support.
0.8.5 (2011-12-14)
------------------
- Response.iter\_lines!
0.8.4 (2011-12-11)
------------------
- Prefetch bugfix.
- Added license to installed version.
0.8.3 (2011-11-27)
------------------
- Converted auth system to use simpler callable objects.
- New session parameter to API methods.
- Display full URL while logging.
0.8.2 (2011-11-19)
------------------
- New Unicode decoding system, based on over-ridable
Response.encoding.
- Proper URL slash-quote handling.
- Cookies with `[`, `]`, and `_` allowed.
0.8.1 (2011-11-15)
------------------
- URL Request path fix
- Proxy fix.
- Timeouts fix.
0.8.0 (2011-11-13)
------------------
- Keep-alive support!
- Complete removal of Urllib2
- Complete removal of Poster
- Complete removal of CookieJars
- New ConnectionError raising
- Safe\_mode for error catching
- prefetch parameter for request methods
- OPTION method
- Async pool size throttling
- File uploads send real names
- Vendored in urllib3
0.7.6 (2011-11-07)
------------------
- Digest authentication bugfix (attach query data to path)
0.7.5 (2011-11-04)
------------------
- Response.content = None if there was an invalid response.
- Redirection auth handling.
0.7.4 (2011-10-26)
------------------
- Session Hooks fix.
0.7.3 (2011-10-23)
------------------
- Digest Auth fix.
0.7.2 (2011-10-23)
------------------
- PATCH Fix.
0.7.1 (2011-10-23)
------------------
- Move away from urllib2 authentication handling.
- Fully Remove AuthManager, AuthObject, &c.
- New tuple-based auth system with handler callbacks.
0.7.0 (2011-10-22)
------------------
- Sessions are now the primary interface.
- Deprecated InvalidMethodException.
- PATCH fix.
- New config system (no more global settings).
0.6.6 (2011-10-19)
------------------
- Session parameter bugfix (params merging).
0.6.5 (2011-10-18)
------------------
- Offline (fast) test suite.
- Session dictionary argument merging.
0.6.4 (2011-10-13)
------------------
- Automatic decoding of unicode, based on HTTP Headers.
- New `decode_unicode` setting.
- Removal of `r.read/close` methods.
- New `r.faw` interface for advanced response usage.\*
- Automatic expansion of parameterized headers.
0.6.3 (2011-10-13)
------------------
- Beautiful `requests.async` module, for making async requests w/
gevent.
0.6.2 (2011-10-09)
------------------
- GET/HEAD obeys allow\_redirects=False.
0.6.1 (2011-08-20)
------------------
- Enhanced status codes experience `\o/`
- Set a maximum number of redirects (`settings.max_redirects`)
- Full Unicode URL support
- Support for protocol-less redirects.
- Allow for arbitrary request types.
- Bugfixes
0.6.0 (2011-08-17)
------------------
- New callback hook system
- New persistent sessions object and context manager
- Transparent Dict-cookie handling
- Status code reference object
- Removed Response.cached
- Added Response.request
- All args are kwargs
- Relative redirect support
- HTTPError handling improvements
- Improved https testing
- Bugfixes
0.5.1 (2011-07-23)
------------------
- International Domain Name Support!
- Access headers without fetching entire body (`read()`)
- Use lists as dicts for parameters
- Add Forced Basic Authentication
- Forced Basic is default authentication type
- `python-requests.org` default User-Agent header
- CaseInsensitiveDict lower-case caching
- Response.history bugfix
0.5.0 (2011-06-21)
------------------
- PATCH Support
- Support for Proxies
- HTTPBin Test Suite
- Redirect Fixes
- settings.verbose stream writing
- Querystrings for all methods
- URLErrors (Connection Refused, Timeout, Invalid URLs) are treated as
explicitly raised
`r.requests.get('hwe://blah'); r.raise_for_status()`
0.4.1 (2011-05-22)
------------------
- Improved Redirection Handling
- New 'allow\_redirects' param for following non-GET/HEAD Redirects
- Settings module refactoring
0.4.0 (2011-05-15)
------------------
- Response.history: list of redirected responses
- Case-Insensitive Header Dictionaries!
- Unicode URLs
0.3.4 (2011-05-14)
------------------
- Urllib2 HTTPAuthentication Recursion fix (Basic/Digest)
- Internal Refactor
- Bytes data upload Bugfix
0.3.3 (2011-05-12)
------------------
- Request timeouts
- Unicode url-encoded data
- Settings context manager and module
0.3.2 (2011-04-15)
------------------
- Automatic Decompression of GZip Encoded Content
- AutoAuth Support for Tupled HTTP Auth
0.3.1 (2011-04-01)
------------------
- Cookie Changes
- Response.read()
- Poster fix
0.3.0 (2011-02-25)
------------------
- Automatic Authentication API Change
- Smarter Query URL Parameterization
- Allow file uploads and POST data together
-
New Authentication Manager System
: - Simpler Basic HTTP System
- Supports all built-in urllib2 Auths
- Allows for custom Auth Handlers
0.2.4 (2011-02-19)
------------------
- Python 2.5 Support
- PyPy-c v1.4 Support
- Auto-Authentication tests
- Improved Request object constructor
0.2.3 (2011-02-15)
------------------
-
New HTTPHandling Methods
: - Response.\_\_nonzero\_\_ (false if bad HTTP Status)
- Response.ok (True if expected HTTP Status)
- Response.error (Logged HTTPError if bad HTTP Status)
- Response.raise\_for\_status() (Raises stored HTTPError)
0.2.2 (2011-02-14)
------------------
- Still handles request in the event of an HTTPError. (Issue \#2)
- Eventlet and Gevent Monkeypatch support.
- Cookie Support (Issue \#1)
0.2.1 (2011-02-14)
------------------
- Added file attribute to POST and PUT requests for multipart-encode
file uploads.
- Added Request.url attribute for context and redirects
0.2.0 (2011-02-14)
------------------
- Birth!
0.0.1 (2011-02-13)
------------------
- Frustration
- Conception
| /requests_freeproxy-2.31.0.tar.gz/requests_freeproxy-2.31.0/HISTORY.md | 0.829906 | 0.803444 | HISTORY.md | pypi |
r"""
The ``codes`` object defines a mapping from common names for HTTP statuses
to their numerical codes, accessible either as attributes or as dictionary
items.
Example::
>>> import requests
>>> requests.codes['temporary_redirect']
307
>>> requests.codes.teapot
418
>>> requests.codes['\o/']
200
Some codes have multiple names, and both upper- and lower-case versions of
the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
``codes.okay`` all correspond to the HTTP status code 200.
"""
from .structures import LookupDict
_codes = {
# Informational.
100: ("continue",),
101: ("switching_protocols",),
102: ("processing",),
103: ("checkpoint",),
122: ("uri_too_long", "request_uri_too_long"),
200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"),
201: ("created",),
202: ("accepted",),
203: ("non_authoritative_info", "non_authoritative_information"),
204: ("no_content",),
205: ("reset_content", "reset"),
206: ("partial_content", "partial"),
207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"),
208: ("already_reported",),
226: ("im_used",),
# Redirection.
300: ("multiple_choices",),
301: ("moved_permanently", "moved", "\\o-"),
302: ("found",),
303: ("see_other", "other"),
304: ("not_modified",),
305: ("use_proxy",),
306: ("switch_proxy",),
307: ("temporary_redirect", "temporary_moved", "temporary"),
308: (
"permanent_redirect",
"resume_incomplete",
"resume",
), # "resume" and "resume_incomplete" to be removed in 3.0
# Client Error.
400: ("bad_request", "bad"),
401: ("unauthorized",),
402: ("payment_required", "payment"),
403: ("forbidden",),
404: ("not_found", "-o-"),
405: ("method_not_allowed", "not_allowed"),
406: ("not_acceptable",),
407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"),
408: ("request_timeout", "timeout"),
409: ("conflict",),
410: ("gone",),
411: ("length_required",),
412: ("precondition_failed", "precondition"),
413: ("request_entity_too_large",),
414: ("request_uri_too_large",),
415: ("unsupported_media_type", "unsupported_media", "media_type"),
416: (
"requested_range_not_satisfiable",
"requested_range",
"range_not_satisfiable",
),
417: ("expectation_failed",),
418: ("im_a_teapot", "teapot", "i_am_a_teapot"),
421: ("misdirected_request",),
422: ("unprocessable_entity", "unprocessable"),
423: ("locked",),
424: ("failed_dependency", "dependency"),
425: ("unordered_collection", "unordered"),
426: ("upgrade_required", "upgrade"),
428: ("precondition_required", "precondition"),
429: ("too_many_requests", "too_many"),
431: ("header_fields_too_large", "fields_too_large"),
444: ("no_response", "none"),
449: ("retry_with", "retry"),
450: ("blocked_by_windows_parental_controls", "parental_controls"),
451: ("unavailable_for_legal_reasons", "legal_reasons"),
499: ("client_closed_request",),
# Server Error.
500: ("internal_server_error", "server_error", "/o\\", "✗"),
501: ("not_implemented",),
502: ("bad_gateway",),
503: ("service_unavailable", "unavailable"),
504: ("gateway_timeout",),
505: ("http_version_not_supported", "http_version"),
506: ("variant_also_negotiates",),
507: ("insufficient_storage",),
509: ("bandwidth_limit_exceeded", "bandwidth"),
510: ("not_extended",),
511: ("network_authentication_required", "network_auth", "network_authentication"),
}
codes = LookupDict(name="status_codes")
def _init():
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
if not title.startswith(("\\", "/")):
setattr(codes, title.upper(), code)
def doc(code):
names = ", ".join(f"``{n}``" for n in _codes[code])
return "* %d: %s" % (code, names)
global __doc__
__doc__ = (
__doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes))
if __doc__ is not None
else None
)
_init() | /requests_freeproxy-2.31.0.tar.gz/requests_freeproxy-2.31.0/requests/status_codes.py | 0.846308 | 0.566258 | status_codes.py | pypi |
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content_type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'https://httpbin.org/get')
>>> req
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("get", url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("options", url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes. If
`allow_redirects` is not provided, it will be set to `False` (as
opposed to the default :meth:`request` behavior).
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", False)
return request("head", url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("post", url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("put", url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("patch", url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("delete", url, **kwargs) | /requests_freeproxy-2.31.0.tar.gz/requests_freeproxy-2.31.0/requests/api.py | 0.864854 | 0.413477 | api.py | pypi |
from collections import OrderedDict
from .compat import Mapping, MutableMapping
class CaseInsensitiveDict(MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
def __eq__(self, other):
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super().__init__()
def __repr__(self):
return f"<lookup '{self.name}'>"
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default) | /requests_freeproxy-2.31.0.tar.gz/requests_freeproxy-2.31.0/requests/structures.py | 0.926893 | 0.4231 | structures.py | pypi |
import requests
import ftplib
import base64
from requests.compat import urlparse
from requests.hooks import dispatch_hook
from requests import Response, codes
from io import BytesIO
import cgi
import os
import socket
from requests.exceptions import ConnectionError, ConnectTimeout, ReadTimeout
from requests.exceptions import RequestException
from requests.utils import prepend_scheme_if_needed
class FTPSession(requests.Session):
def __init__(self):
super(FTPSession, self).__init__()
self.mount('ftp://', FTPAdapter())
# Define our helper methods.
def list(self, url, **kwargs):
'''Sends an FTP LIST. Returns a Response object.'''
return self.request('LIST', url, **kwargs)
def retr(self, url, **kwargs):
'''Sends an FTP RETR for a given url. Returns a Response object whose
content field contains the binary data.'''
return self.request('RETR', url, **kwargs)
def stor(self, url, files=None, **kwargs):
'''Sends an FTP STOR to a given URL. Returns a Response object. Expects
to be given one file by the standard Requests method. The remote
filename will be given by the URL provided.'''
return self.request('STOR', url, files=files, **kwargs)
def nlst(self, url, **kwargs):
'''Sends an FTP NLST. Returns a Response object.'''
return self.request('NLST', url, **kwargs)
def size(self, url, **kwargs):
'''Sends an FTP SIZE. Returns a decimal number.'''
return self.request('SIZE', url, **kwargs)
def monkeypatch_session():
'''Monkeypatch Requests Sessions to provide all the helper
methods needed for use with FTP.'''
requests.Session = FTPSession
return
def parse_multipart_files(request):
'''Given a prepared reqest, return a file-like object containing the
original data. This is pretty hacky.'''
# Start by grabbing the pdict.
_, pdict = cgi.parse_header(request.headers['Content-Type'])
# Now, wrap the multipart data in a BytesIO buffer. This is annoying.
buf = BytesIO()
buf.write(request.body)
buf.seek(0)
# Parse the data. Simply take the first file.
data = cgi.parse_multipart(buf, pdict)
_, filedata = data.popitem()
buf.close()
# Get a BytesIO now, and write the file into it.
buf = BytesIO()
buf.write(''.join(filedata))
buf.seek(0)
return buf
def data_callback_factory(variable):
'''Returns a callback suitable for use by the FTP library. This callback
will repeatedly save data into the variable provided to this function. This
variable should be a file-like structure.'''
def callback(data):
variable.write(data)
if hasattr(variable, "content_len"):
variable.content_len += len(data)
else:
variable.content_len = len(data)
return
return callback
def build_text_response(request, data, code):
'''Build a response for textual data.'''
return build_response(request, data, code, 'ascii')
def build_binary_response(request, data, code):
'''Build a response for data whose encoding is unknown.'''
return build_response(request, data, code, None)
def build_response(request, data, code, encoding):
'''Builds a response object from the data returned by ftplib, using the
specified encoding.'''
response = Response()
response.encoding = encoding
# Fill in some useful fields.
response.raw = data
response.url = request.url
response.request = request
response.status_code = int(code.split()[0])
if hasattr(data, "content_len"):
response.headers['Content-Length'] = str(data.content_len)
# Make sure to seek the file-like raw object back to the start.
response.raw.seek(0)
# Run the response hook.
response = dispatch_hook('response', request.hooks, response)
return response
class FTPAdapter(requests.adapters.BaseAdapter):
'''A Requests Transport Adapter that handles FTP urls.'''
def __init__(self):
super(FTPAdapter, self).__init__()
# Build a dictionary keyed off the methods we support in upper case.
# The values of this dictionary should be the functions we use to
# send the specific queries.
self.func_table = {'LIST': self.list,
'RETR': self.retr,
'STOR': self.stor,
'NLST': self.nlst,
'SIZE': self.size,
'HEAD': self.head,
'GET': self.get,}
def send(self, request, **kwargs):
'''Sends a PreparedRequest object over FTP. Returns a response object.
'''
# Get the authentication from the prepared request, if any.
auth = self.get_username_password_from_header(request)
# Next, get the host and the path.
scheme, host, port, path = self.get_host_and_path_from_url(request)
# Sort out the timeout.
timeout = kwargs.get('timeout', None)
# Look for a proxy
proxies = kwargs.get('proxies', {})
proxy = proxies.get(scheme)
# If there is a proxy, then we actually want to make a HTTP request
if proxy:
return self.send_proxy(request, proxy, **kwargs)
# Establish the connection and login if needed.
self.conn = ftplib.FTP()
# Use a flag to distinguish read vs connection timeouts, and a flat set
# of except blocks instead of a nested try-except, because python 3
# exception chaining makes things weird
connected = False
try:
self.conn.connect(host, port, timeout)
connected = True
if auth is not None:
self.conn.login(auth[0], auth[1])
else:
self.conn.login()
# Get the method and attempt to find the function to call.
resp = self.func_table[request.method](path, request)
except socket.timeout as e:
# requests distinguishes between connection timeouts and others
if connected:
raise ReadTimeout(e, request=request)
else:
raise ConnectTimeout(e, request=request)
# ftplib raises EOFError if the connection is unexpectedly closed.
# Convert that or any other socket error to a ConnectionError.
except (EOFError, socket.error) as e:
raise ConnectionError(e, request=request)
# Raised for 5xx errors. FTP uses 550 for both ENOENT and EPERM type
# errors, so just translate all of these into a http-ish 404
except ftplib.error_perm as e:
# The exception message is probably from the server, so if it's
# non-ascii, who knows what the encoding is. Latin1 has the
# advantage of not being able to fail.
resp = build_text_response(request,
BytesIO(str(e).encode('latin1')), str(codes.not_found))
# 4xx reply, translate to a http 503
except ftplib.error_temp as e:
resp = build_text_response(request,
BytesIO(str(e).encode('latin1')), str(codes.unavailable))
# error_reply is an unexpected status code, and error_proto is an
# invalid status code. Error is the generic ftplib error, usually
# raised when a line is too long. Translate all of them to a generic
# RequestException
except (ftplib.error_reply, ftplib.error_proto, ftplib.Error) as e:
raise RequestException(e, request=request)
# Return the response.
return resp
def close(self):
'''Dispose of any internal state.'''
# Currently this is a no-op.
pass
def send_proxy(self, request, proxy, **kwargs):
'''Send a FTP request through a HTTP proxy'''
# Direct the request through a HTTP adapter instead
proxy_url = prepend_scheme_if_needed(proxy, 'http')
s = requests.Session()
adapter = s.get_adapter(proxy_url)
try:
return adapter.send(request, **kwargs)
finally:
adapter.close()
def list(self, path, request):
'''Executes the FTP LIST command on the given path.'''
data = BytesIO()
# To ensure the BytesIO object gets cleaned up, we need to alias its
# close method to the release_conn() method. This is a dirty hack, but
# there you go.
data.release_conn = data.close
self.conn.cwd(path)
code = self.conn.retrbinary('LIST', data_callback_factory(data))
# When that call has finished executing, we'll have all our data.
response = build_text_response(request, data, code)
# Close the connection.
self.conn.close()
return response
def retr(self, path, request):
'''Executes the FTP RETR command on the given path.'''
data = BytesIO()
# To ensure the BytesIO gets cleaned up, we need to alias its close
# method. See self.list().
data.release_conn = data.close
code = self.conn.retrbinary('RETR ' + path, data_callback_factory(data))
response = build_binary_response(request, data, code)
# Close the connection.
self.conn.close()
return response
def get(self, path, request):
'''Executes the FTP RETR command on the given path.
This is the same as retr except that the FTP server code is
converted to a HTTP 200.
'''
response = self.retr(path, request)
# Errors are handled in send(), so assume everything is ok if we
# made it this far
response.status_code = codes.ok
return response
def size(self, path, request):
'''Executes the FTP SIZE command on the given path.'''
self.conn.voidcmd('TYPE I') # SIZE is not usually allowed in ASCII mode
size = self.conn.size(path)
if not str(size).isdigit():
self.conn.close()
return None
data = BytesIO(bytes(size))
# To ensure the BytesIO gets cleaned up, we need to alias its close
# method to the release_conn() method. This is a dirty hack, but there
# you go.
data.release_conn = data.close
data.content_len = size
response = build_text_response(request, data, '213')
self.conn.close()
return response
def head(self, path, request):
'''Executes the FTP SIZE command on the given path.
This is the same as size except that the FTP server code is
converted to a HTTP 200.
'''
response = self.size(path, request)
response.status_code = codes.ok
return response
def stor(self, path, request):
'''Executes the FTP STOR command on the given path.'''
# First, get the file handle. We assume (bravely)
# that there is only one file to be sent to a given URL. We also
# assume that the filename is sent as part of the URL, not as part of
# the files argument. Both of these assumptions are rarely correct,
# but they are easy.
data = parse_multipart_files(request)
# Split into the path and the filename.
path, filename = os.path.split(path)
# Switch directories and upload the data.
self.conn.cwd(path)
code = self.conn.storbinary('STOR ' + filename, data)
# Close the connection and build the response.
self.conn.close()
response = build_binary_response(request, BytesIO(), code)
return response
def nlst(self, path, request):
'''Executes the FTP NLST command on the given path.'''
data = BytesIO()
# Alias the close method.
data.release_conn = data.close
self.conn.cwd(path)
code = self.conn.retrbinary('NLST', data_callback_factory(data))
# When that call has finished executing, we'll have all our data.
response = build_text_response(request, data, code)
# Close the connection.
self.conn.close()
return response
def get_username_password_from_header(self, request):
'''Given a PreparedRequest object, reverse the process of adding HTTP
Basic auth to obtain the username and password. Allows the FTP adapter
to piggyback on the basic auth notation without changing the control
flow.'''
auth_header = request.headers.get('Authorization')
if auth_header:
# The basic auth header is of the form 'Basic xyz'. We want the
# second part. Check that we have the right kind of auth though.
encoded_components = auth_header.split()[:2]
if encoded_components[0] != 'Basic':
raise AuthError('Invalid form of Authentication used.')
else:
encoded = encoded_components[1]
# Decode the base64 encoded string.
decoded = base64.b64decode(encoded)
# The auth string was encoded to bytes by requests using latin1,
# and will be encoded to bytes by ftplib (in python 3) using
# latin1. In the meantime, use a str
decoded = decoded.decode('latin1')
# The string is of the form 'username:password'. Split on the
# colon.
components = decoded.split(':')
username = components[0]
password = components[1]
return (username, password)
else:
# No auth header. Return None.
return None
def get_host_and_path_from_url(self, request):
'''Given a PreparedRequest object, split the URL in such a manner as to
determine the host and the path. This is a separate method to wrap some
of urlparse's craziness.'''
url = request.url
# scheme, netloc, path, params, query, fragment = urlparse(url)
parsed = urlparse(url)
scheme = parsed.scheme
path = parsed.path
# If there is a slash on the front of the path, chuck it.
if path.startswith('/'):
path = path[1:]
host = parsed.hostname
port = parsed.port or 0
return (scheme, host, port, path)
class AuthError(Exception):
'''Denotes an error with authentication.'''
pass | /requests-ftp-0.3.1.tar.gz/requests-ftp-0.3.1/requests_ftp/ftp.py | 0.645455 | 0.189465 | ftp.py | pypi |
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from functools import partial
from logging import getLogger
from pickle import PickleError, dumps
from requests import Session
from requests.adapters import DEFAULT_POOLSIZE, HTTPAdapter
def wrap(self, sup, background_callback, *args_, **kwargs_):
"""A global top-level is required for ProcessPoolExecutor"""
resp = sup(*args_, **kwargs_)
return background_callback(self, resp) or resp
PICKLE_ERROR = (
'Cannot pickle function. Refer to documentation: https://'
'github.com/ross/requests-futures/#using-processpoolexecutor'
)
class FuturesSession(Session):
def __init__(
self,
executor=None,
max_workers=8,
session=None,
adapter_kwargs=None,
*args,
**kwargs
):
"""Creates a FuturesSession
Notes
~~~~~
* `ProcessPoolExecutor` may be used with Python > 3.4;
see README for more information.
* If you provide both `executor` and `max_workers`, the latter is
ignored and provided executor is used as is.
"""
_adapter_kwargs = {}
super(FuturesSession, self).__init__(*args, **kwargs)
self._owned_executor = executor is None
if executor is None:
executor = ThreadPoolExecutor(max_workers=max_workers)
# set connection pool size equal to max_workers if needed
if max_workers > DEFAULT_POOLSIZE:
_adapter_kwargs.update(
{
'pool_connections': max_workers,
'pool_maxsize': max_workers,
}
)
_adapter_kwargs.update(adapter_kwargs or {})
if _adapter_kwargs:
self.mount('https://', HTTPAdapter(**_adapter_kwargs))
self.mount('http://', HTTPAdapter(**_adapter_kwargs))
self.executor = executor
self.session = session
def request(self, *args, **kwargs):
"""Maintains the existing api for Session.request.
Used by all of the higher level methods, e.g. Session.get.
The background_callback param allows you to do some processing on the
response in the background, e.g. call resp.json() so that json parsing
happens in the background thread.
:rtype : concurrent.futures.Future
"""
if self.session:
func = self.session.request
else:
# avoid calling super to not break pickled method
func = partial(Session.request, self)
background_callback = kwargs.pop('background_callback', None)
if background_callback:
logger = getLogger(self.__class__.__name__)
logger.warning(
'`background_callback` is deprecated and will be '
'removed in 1.0, use `hooks` instead'
)
func = partial(wrap, self, func, background_callback)
if isinstance(self.executor, ProcessPoolExecutor):
# verify function can be pickled
try:
dumps(func)
except (TypeError, PickleError):
raise RuntimeError(PICKLE_ERROR)
return self.executor.submit(func, *args, **kwargs)
def close(self):
super(FuturesSession, self).close()
if self._owned_executor:
self.executor.shutdown()
def get(self, url, **kwargs):
r"""
Sends a GET request. Returns :class:`Future` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype : concurrent.futures.Future
"""
return super(FuturesSession, self).get(url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Future` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype : concurrent.futures.Future
"""
return super(FuturesSession, self).options(url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Future` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype : concurrent.futures.Future
"""
return super(FuturesSession, self).head(url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
r"""Sends a POST request. Returns :class:`Future` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype : concurrent.futures.Future
"""
return super(FuturesSession, self).post(
url, data=data, json=json, **kwargs
)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`Future` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype : concurrent.futures.Future
"""
return super(FuturesSession, self).put(url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`Future` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype : concurrent.futures.Future
"""
return super(FuturesSession, self).patch(url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Future` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype : concurrent.futures.Future
"""
return super(FuturesSession, self).delete(url, **kwargs) | /requests-futures-1.0.1.tar.gz/requests-futures-1.0.1/requests_futures/sessions.py | 0.842701 | 0.179028 | sessions.py | pypi |
from .sessions import Session
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:param tls_config: (optional) Dictionary, tls-client config params. If None, random tls. Default None.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests_go
>>> req = requests_go.request('GET', 'https://httpbin.org/get')
>>> req
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("get", url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("options", url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes. If
`allow_redirects` is not provided, it will be set to `False` (as
opposed to the default :meth:`request` behavior).
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", False)
return request("head", url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("post", url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("put", url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("patch", url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request("delete", url, **kwargs) | /requests_go-0.4.tar.gz/requests_go-0.4/requests_go/api.py | 0.857709 | 0.407392 | api.py | pypi |
from typing import MutableMapping, Mapping
from collections import OrderedDict
class CaseInsensitiveDict(MutableMapping):
"""Origin: requests library (https://github.com/psf/requests)
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
def __eq__(self, other):
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items())) | /requests_go-0.4.tar.gz/requests_go-0.4/requests_go/tls_client/structures.py | 0.934313 | 0.58255 | structures.py | pypi |
from .extensions import TLSExtensions, HTTP2Settings
class TLSConfig:
def __init__(self):
super(TLSConfig, self).__init__()
self._keys = [
"ja3",
"headers_order",
"force_http1",
"pseudo_header_order",
"tls_extensions",
"http2_settings",
]
self.ja3: str = None # tls ja3 value
self.headers_order: list[str] = None # http headers order
self.force_http1: bool = False # force http1 request
# :method
# :authority
# :scheme
# :path
# Example:
# [
# ":method",
# ":authority",
# ":scheme",
# ":path"
# ]
self.pseudo_header_order: list[str] = [
":method",
":authority",
":scheme",
":path"
] # HTTP2 Pseudo header order
self.tls_extensions: TLSExtensions = TLSExtensions() # tls extensions
self.http2_settings: HTTP2Settings = HTTP2Settings() # http2 extensions
def __str__(self):
return str(self.toJSON())
def __iter__(self):
for key in self._keys:
yield key, getattr(self, key)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, item):
return getattr(self, item)
def __delitem__(self, key):
setattr(self, key, None)
def __delattr__(self, item):
setattr(self, item, None)
# JSON转类
def _fromJSON(self, config: dict):
for key, value in config.items():
if key in self._keys:
setattr(self, key, value)
# 类转JSON
def toJSON(self):
result = {}
for key in self._keys:
go_keys = key.split("_")
go_key = ""
for k in go_keys:
if k == "tls" or k == "http2" or k == "http1":
go_key += k.upper()
else:
go_key += k.title()
if key in ["tls_extensions", "http2_settings"]:
result[go_key] = getattr(self, key).toJSON()
else:
result[go_key] = getattr(self, key)
return result | /requests_go-0.4.tar.gz/requests_go-0.4/requests_go/tls_config/config.py | 0.600774 | 0.176743 | config.py | pypi |
from .config import TLSConfig
# X.509证书数字签名算法英文形式标识符
TLS1_3_Identifier = {
'rsa_pkcs1_sha512': 'PKCS1WithSHA512',
'rsa_pkcs1_sha384': 'PKCS1WithSHA384',
'rsa_pkcs1_sha256': 'PKCS1WithSHA256',
'rsa_pkcs1_sha224': 'PKCS1WithSHA224',
'rsa_pkcs1_sha1': 'PKCS1WithSHA1',
'dsa_sha512': 'DSAWithSHA512',
'dsa_sha384': 'DSAWithSHA384',
'dsa_sha256': 'DSAWithSHA256',
'dsa_sha224': 'DSAWithSHA224',
'dsa_sha1': 'DSAWithSHA1',
'ecdsa_sha512': 'ECDSAWithSHA512',
'ecdsa_sha384': 'ECDSAWithSHA384',
'ecdsa_sha256': 'ECDSAWithSHA256',
'ecdsa_sha224': 'ECDSAWithSHA224',
'ecdsa_sha1': 'ECDSAWithSHA1',
'rsa_pss_rsae_sha512': 'PSSWithSHA512',
'rsa_pss_rsae_sha384': 'PSSWithSHA384',
'rsa_pss_rsae_sha256': 'PSSWithSHA256',
'rsa_pss_rsae_sha224': 'PSSWithSHA224',
'rsa_pss_rsae_sha1': 'PSSWithSHA1',
'ecdsa_secp521r1_sha512': 'ECDSAWithP521AndSHA512',
'ecdsa_secp384r1_sha384': 'ECDSAWithP384AndSHA384',
'ecdsa_secp256r1_sha256': 'ECDSAWithP256AndSHA256',
'ecdsa_secp224r1_sha224': 'ECDSAWithP224AndSHA224',
}
def to_tls_config(config: dict) -> TLSConfig:
tls_config = TLSConfig()
tls_config.ja3 = get_ja3_string(config)
tls_config.headers_order = get_header_order(config)
tls_config.force_http1 = get_force_http1(config)
tls_config.pseudo_header_order = get_pseudo_header_order(config)
tls_config.tls_extensions.supported_signature_algorithms = get_supported_signature_algorithms(config)
tls_config.tls_extensions.cert_compression_algo = get_cert_compression_algo(config)
tls_config.tls_extensions.record_size_limit = get_record_size_limit(config)
tls_config.tls_extensions.supported_delegated_credentials_algorithms = get_supported_delegated_credentials_algorithms(config)
tls_config.tls_extensions.supported_versions = get_supported_versions(config)
tls_config.tls_extensions.psk_key_exchange_modes = get_psk_key_exchange_modes(config)
tls_config.tls_extensions.signature_algorithms_cert = get_signature_algorithms_cert(config)
tls_config.tls_extensions.key_share_curves = get_key_share_curves(config)
tls_config.tls_extensions.not_used_grease = get_not_used_grease(config)
tls_config.http2_settings.settings = get_h2_settings(config)
tls_config.http2_settings.settings_order = get_h2_settings_order(config)
tls_config.http2_settings.connection_flow = get_connection_flow(config)
tls_config.http2_settings.header_priority = get_header_priority(config)
tls_config.http2_settings.priority_frames = get_priority_frames(config)
return tls_config
def get_ja3_string(config):
ja3_string = config["tls"]["ja3"]
return ja3_string
def get_header_order(config):
headers = {}
headers_list = []
sent_frames = config["http2"]["sent_frames"]
for sent_frame in sent_frames:
if sent_frame["frame_type"] == "HEADERS":
headers_list = sent_frame["headers"]
break
for header in headers_list:
if header[0] == ":":
continue
key, value = header.split(":", 1)
key = key.strip()
value = value.strip()
headers[key] = value
return list(headers.keys())
def get_force_http1(config):
force_http1 = False
if config["http_version"] != "h2":
force_http1 = True
return force_http1
def get_pseudo_header_order(config):
headers = {}
headers_list = []
sent_frames = config["http2"]["sent_frames"]
for sent_frame in sent_frames:
if sent_frame["frame_type"] == "HEADERS":
headers_list = sent_frame["headers"]
break
for header in headers_list:
if header[0] == ":":
key, value = header.split(":")[1:]
key = ":" + key.strip()
value = value.strip()
headers[key] = value
return list(headers.keys())
def get_supported_signature_algorithms(config):
supported_signature_algorithms = []
extensions = config["tls"]["extensions"]
for extension in extensions:
if extension.get("signature_algorithms", False):
signature_algorithms = extension["signature_algorithms"]
for signature_algorithm in signature_algorithms:
supported_signature_algorithms.append(signature_algorithm)
if supported_signature_algorithms:
return supported_signature_algorithms
return None
def get_cert_compression_algo(config):
cert_compression_algo = None
extensions = config["tls"]["extensions"]
for extension in extensions:
if "compress_certificate" in extension["name"]:
for algorithm in extension["algorithms"]:
if not cert_compression_algo:
cert_compression_algo = []
cert_compression_algo.append(algorithm.split("(", 1)[0].strip())
return cert_compression_algo
def get_record_size_limit(config):
record_size_limit = None
extensions = config["tls"]["extensions"]
for extension in extensions:
if "record_size_limit" in extension["name"]:
record_size_limit = int(extension["data"])
return record_size_limit
def get_supported_delegated_credentials_algorithms(config):
supported_delegated_credentials_algorithms = []
extensions = config["tls"]["extensions"]
for extension in extensions:
if extension.get("signature_hash_algorithms", False):
delegated_credentials_algorithms = extension["signature_hash_algorithms"]
for delegated_credentials_algorithm in delegated_credentials_algorithms:
supported_delegated_credentials_algorithms.append(delegated_credentials_algorithm)
if supported_delegated_credentials_algorithms:
return supported_delegated_credentials_algorithms
return None
def get_supported_versions(config):
supported_versions = []
extensions = config["tls"]["extensions"]
for extension in extensions:
if "supported_versions" in extension["name"]:
versions = extension["versions"]
for version in versions:
key = version
if "TLS_" in key:
key = key.split("TLS_", 1)[-1]
elif "TLS " in key:
key = key.split("TLS ", 1)[-1]
key = key.split("(", 1)[0]
key = key.strip()
supported_versions.append(key)
if supported_versions:
return supported_versions
return None
def get_psk_key_exchange_modes(config):
psk_key_exchange_modes = None
extensions = config["tls"]["extensions"]
for extension in extensions:
if "psk_key_exchange_modes" in extension["name"]:
if not psk_key_exchange_modes:
psk_key_exchange_modes = []
if extension.get("PSK_Key_Exchange_Mode", ""):
if extension["PSK_Key_Exchange_Mode"].endswith("(0)"):
psk_key_exchange_modes.append("PskModePlain")
else:
psk_key_exchange_modes.append("PskModeDHE")
return psk_key_exchange_modes
# 没法实现
def get_signature_algorithms_cert(config):
pass
def get_key_share_curves(config):
key_share_curves = []
extensions = config["tls"]["extensions"]
for extension in extensions:
if "key_share" in extension["name"]:
shared_keys = extension["shared_keys"]
for shared_key in shared_keys:
key = list(shared_key.keys())[0]
key = key.split("TLS_", 1)[-1]
key = key.split("(", 1)[0]
key = key.strip()
key = key.replace("-", "")
key_share_curves.append(key)
if key_share_curves:
return key_share_curves
return None
def get_not_used_grease(config):
not_used_grease = False
if "TLS_GREASE" not in config["tls"]["extensions"][0]["name"]:
not_used_grease = True
return not_used_grease
def get_h2_settings(config):
settings = {}
setting_list = []
sent_frames = config["http2"]["sent_frames"]
for sent_frame in sent_frames:
if sent_frame["frame_type"] == "SETTINGS":
setting_list = sent_frame["settings"]
for setting in setting_list:
key, value = setting.split("=", 1)
key = key.strip()
value = value.strip()
settings[key] = int(value)
if settings:
return settings
return None
def get_h2_settings_order(config):
settings = get_h2_settings(config)
return list(settings.keys())
def get_connection_flow(config):
connection_flow = None
sent_frames = config["http2"]["sent_frames"]
for sent_frame in sent_frames:
if sent_frame["frame_type"] == "WINDOW_UPDATE":
connection_flow = sent_frame["increment"]
break
return connection_flow
def get_header_priority(config):
header_priority = None
sent_frames = config["http2"]["sent_frames"]
for sent_frame in sent_frames:
if sent_frame["frame_type"] == "HEADERS":
if sent_frame.get("priority", False):
priority = sent_frame["priority"]
header_priority = {
"weight": priority["weight"],
"streamDep": priority["depends_on"],
"exclusive": True if priority["exclusive"] else False
}
break
return header_priority
def get_priority_frames(config):
priority_frames = []
sent_frames = config["http2"]["sent_frames"]
for sent_frame in sent_frames:
if sent_frame["frame_type"] == "PRIORITY":
priority = sent_frame["priority"]
priority_frame = {
"streamID": sent_frame["stream_id"],
"priorityParam": {
"weight": priority["weight"],
"streamDep": priority["depends_on"],
"exclusive": True if priority["exclusive"] else False
}
}
priority_frames.append(priority_frame)
if priority_frames:
return priority_frames
return None | /requests_go-0.4.tar.gz/requests_go-0.4/requests_go/tls_config/convert_config.py | 0.458349 | 0.255161 | convert_config.py | pypi |
"""HTTPS adapter to close connections with expired client certificates."""
from datetime import datetime, timedelta
from functools import partial
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import load_pem_x509_certificate
from requests.packages.urllib3.connection import HTTPSConnection
from requests.packages.urllib3.connectionpool import (HTTPConnectionPool,
HTTPSConnectionPool)
from requests.adapters import HTTPAdapter
_backend = default_backend()
def load_x509_certificate(filename):
"""Load an X.509 certificate from a file.
Parameters
----------
filename : str
The name of the certificate file.
Returns
-------
cert : cryptography.x509.Certificate
The parsed certificate.
"""
with open(filename, 'rb') as f:
data = f.read()
return load_pem_x509_certificate(data, _backend)
class _CertReloadingHTTPSConnection(HTTPSConnection):
def __init__(self, host, cert_reload_timeout=0, **kwargs):
super(_CertReloadingHTTPSConnection, self).__init__(host, **kwargs)
self._not_valid_after = datetime.max
self._reload_timeout = timedelta(seconds=cert_reload_timeout)
@property
def cert_has_expired(self):
expires = self._not_valid_after - datetime.utcnow()
return expires <= self._reload_timeout
def connect(self):
if self.cert_file:
cert = load_x509_certificate(self.cert_file)
self._not_valid_after = cert.not_valid_after
super(_CertReloadingHTTPSConnection, self).connect()
class _CertReloadingHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = _CertReloadingHTTPSConnection
def __init__(self, host, port=None, cert_reload_timeout=0, **kwargs):
super(_CertReloadingHTTPSConnectionPool, self).__init__(
host, port=port, **kwargs)
self.conn_kw['cert_reload_timeout'] = cert_reload_timeout
def _get_conn(self, timeout=None):
while True:
conn = super(_CertReloadingHTTPSConnectionPool, self)._get_conn(
timeout)
# Note: this loop is guaranteed to terminate because, even if the
# pool is completely drained, when we create a new connection, its
# `_not_valid_after` property is set to `datetime.max`, and the
# condition below will evaulate to `True`.
if not conn.cert_has_expired:
return conn
conn.close()
class CertReloadingHTTPAdapter(HTTPAdapter):
"""A mixin for :class:`requests.Session` to automatically reload the client
X.509 certificates if the version that is stored in the session is going to
expire soon.
Parameters
----------
cert_reload_timeout : int
Reload the certificate if it expires within this many seconds from now.
"""
def __init__(self, cert_reload_timeout=0, **kwargs):
super(CertReloadingHTTPAdapter, self).__init__(**kwargs)
https_pool_cls = partial(
_CertReloadingHTTPSConnectionPool,
cert_reload_timeout=cert_reload_timeout)
self.poolmanager.pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': https_pool_cls} | /requests_gracedb-0.1.4-py3-none-any.whl/requests_gracedb/cert_reload.py | 0.907822 | 0.155078 | cert_reload.py | pypi |
import json
import pathlib
from cgi import parse_header
from collections import OrderedDict
from datetime import datetime
from http import HTTPStatus
from typing import Any
from urllib.parse import parse_qsl, urlsplit, urlparse, ParseResult
from http.cookiejar import Cookie
from requests import PreparedRequest, Response, __version__ as requests_version
from requests_har import __version__
def has_http_only(cookie: Cookie) -> bool:
extra_args = vars(cookie).get('_rest')
for key in extra_args:
if key.lower() == 'httponly':
return True
return False
def get_charset(headers: dict) -> str:
header = headers.get('Content-Type', 'application/json; charset=utf-8')
parsed = parse_header(header)
if len(parsed) == 1:
return 'utf-8'
return parsed[1].get('charset', 'utf-8')
def format_query(url: str) -> list[dict[str, str]]:
splits = urlsplit(url)
query = splits.query
parsed = parse_qsl(query)
return [
{'name': name, 'value': value, 'comment': ''}
for name, value in parsed
]
def format_cookie(cookie: Cookie):
return {
'name': cookie.name,
'value': cookie.value,
'path': cookie.path,
'domain': cookie.domain,
'expires': datetime.fromtimestamp(cookie.expires).isoformat(),
'secure': cookie.secure,
'comment': cookie.comment,
}
def format_header(name: str, value: str) -> dict[str, str]:
return {
'name': name,
'value': value,
'comment': '',
}
def format_post_data(request: PreparedRequest):
body = request.body
if isinstance(body, bytes):
charset = get_charset(request.headers)
try:
body = body.decode(charset)
except UnicodeDecodeError:
body = ''
return {
'mimeType': request.headers.get('Content-Type', 'application/json'),
'params': [],
'text': body,
}
def get_header_size(headers: dict[str, str]) -> int:
return len(
'\n'.join(
'%s: %s' % (header_name, header_value)
for header_name, header_value in headers.items()
)
)
def format_response_content(response: Response) -> dict[str, int | str]:
content = response.content
if isinstance(content, bytes):
charset = get_charset(response.headers)
content = content.decode(charset)
return {
'size': len(response.content) if response.content is not None else -1,
'mimeType': response.headers['Content-Type'],
'text': content,
'comment': '',
}
def format_request(request: PreparedRequest, http_version: str) -> dict[str, Any]:
data = {
"method": request.method,
"url": request.url,
"httpVersion": http_version,
"cookies": [format_cookie(cookie) for cookie in request._cookies],
"headers": [
format_header(name, value)
for name, value in request.headers.items()
],
"queryString": format_query(request.url),
"headersSize": get_header_size(request.headers),
"bodySize": len(request.body) if request.body is not None else -1,
"comment": ""
}
if request.body:
data['postData'] = format_post_data(request)
return data
def format_response(response: Response, http_version: str) -> dict[str, Any]:
data = {
"status": response.status_code,
"statusText": HTTPStatus(response.status_code).name,
"httpVersion": http_version,
"cookies": [format_cookie(cookie) for cookie in response.cookies],
"headers": [
format_header(name, value)
for name, value in response.headers.items()
],
"content": format_response_content(response),
"redirectURL": response.headers.get('Location', ''),
"headersSize": get_header_size(response.headers),
"bodySize": len(response.content) if response.content is not None else -1,
"comment": ""
}
return data
class HarDict(dict):
'''
Dictionnary tailored to hold requests and responses
in order to later save it as an HTTP ARchive file.
'''
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
self['log'] = {
'version': '1.2',
'creator': {
"name": "requests-har",
"version": __version__,
},
'browser': {
"name": "requests",
"version": requests_version,
},
'pages': [],
'entries': [],
}
self.created_at = datetime.now().strftime("%Y%-m-%d_%H-%M-%S")
self.filename: str | None = None
def on_response(
self, response: Response, timeout: int | None = None,
verify: bool = True, proxies: OrderedDict = OrderedDict(),
stream: bool = False, cert: str | None = None,
):
'''
Method designed to be used as a response hook
for the python requests library
On response, save the contents of the prepared request and the associated
response to the HarDict object.
'''
if self.filename is None and response.request.url is not None:
request_url: ParseResult = urlparse(response.request.url)
self.filename = f'{self.created_at}_{request_url.netloc}.har'
now = datetime.utcnow().isoformat(timespec='milliseconds')
http_version: str = {
10: "HTTP/1.0", 11: "HTTP/1.1"
}.get(response.raw.version, 'HTTP/1.1')
entry = {
"startedDateTime": now,
"time": 0,
"request": format_request(response.request, http_version),
"response": format_response(response, http_version),
"cache": {
"beforeRequest": None,
"afterRequest": None,
},
"timings": {
"send": 0,
"wait": 0,
},
"_timeout": timeout,
"_verify": verify,
"_proxies": dict(proxies),
"_stream": stream,
"_cert": cert
}
self['log']['entries'].append(entry)
def save(self, path: pathlib.Path):
'''Saves the contents of this dict to the disk as JSON.'''
filepath: pathlib.Path = path / self.filename
filepath.parent.mkdir(parents=True, exist_ok=True)
with open(filepath, 'w') as filepointer:
json.dump(self, filepointer, indent=2) | /requests_har-1.0.0-py3-none-any.whl/requests_har/har.py | 0.617397 | 0.162912 | har.py | pypi |
import binascii
import codecs
import hashlib
import hmac
import math
from six.moves import xrange
from six.moves.urllib.parse import urlparse
from six import text_type
import mohawk
from mohawk.base import EmptyValue
from requests.auth import AuthBase
class HawkAuth(AuthBase):
"""Handles authentication using Hawk.
:param hawk_session:
The hawk session, from the server, encoded as hexadecimal.
You don't need to set this parameter if you already know the hawk
credentials (Optional).
:param id:
The hawk id string to use for authentication (Optional).
:param key:
A string containing the hawk secret key (Optional).
:param algorithm:
A string containing the name of the algorithm to be used.
(Optional, defaults to 'sha256').
:param server_url:
The url of the server, this is useful for hawk when signing the requests.
In case this is omitted, fallbacks to the value of the "Host" header of
the request (Optional).
:param ext:
A string of arbitrary data to be sent along with the request (Optional).
Note that the `hawk_session` and `id` parameters are mutually exclusive.
You should use either `hawk_session` or both `id` and 'key'.
"""
def __init__(self, hawk_session=None, id=None, key=None, algorithm='sha256',
credentials=None, server_url=None, _timestamp=None,
always_hash_content=True, ext=None, app=None):
if credentials is not None:
raise AttributeError("The 'credentials' param has been removed. "
"Pass 'id' and 'key' instead, or '**credentials_dict'.")
if (hawk_session and (id or key)
or not hawk_session and not (id and key)):
raise AttributeError("You should pass either 'hawk_session' "
"or both 'id' and 'key'.")
if hawk_session:
try:
hawk_session = codecs.decode(hawk_session, 'hex_codec')
except binascii.Error as e:
raise TypeError(e)
keyInfo = 'identity.mozilla.com/picl/v1/sessionToken'
keyMaterial = HKDF(hawk_session, "", keyInfo, 32*2)
id = codecs.encode(keyMaterial[:32], "hex_codec")
key = codecs.encode(keyMaterial[32:64], "hex_codec")
self.credentials = {
'id': id,
'key': key,
'algorithm': algorithm
}
self._timestamp = _timestamp
self.host = urlparse(server_url).netloc if server_url else None
self.always_hash_content = always_hash_content
self.ext = ext
self.app = app
def __call__(self, r):
if self.host is not None:
r.headers['Host'] = self.host
content_type = r.headers.get("Content-Type") or ""
if not isinstance(content_type, text_type):
content_type = content_type.decode("utf-8")
sender = mohawk.Sender(
self.credentials,
r.url,
r.method,
content=r.body or EmptyValue,
content_type=content_type or EmptyValue,
always_hash_content=self.always_hash_content,
_timestamp=self._timestamp,
ext=self.ext,
app=self.app
)
r.headers['Authorization'] = sender.request_header
return r
def HKDF_extract(salt, IKM, hashmod=hashlib.sha256):
"""HKDF-Extract; see RFC-5869 for the details."""
if salt is None:
salt = b"\x00" * hashmod().digest_size
if isinstance(salt, text_type):
salt = salt.encode("utf-8")
return hmac.new(salt, IKM, hashmod).digest()
def HKDF_expand(PRK, info, L, hashmod=hashlib.sha256):
"""HKDF-Expand; see RFC-5869 for the details."""
if isinstance(info, text_type):
info = info.encode("utf-8")
digest_size = hashmod().digest_size
N = int(math.ceil(L * 1.0 / digest_size))
assert N <= 255
T = b""
output = []
for i in xrange(1, N + 1):
data = T + info + chr(i).encode("utf-8")
T = hmac.new(PRK, data, hashmod).digest()
output.append(T)
return b"".join(output)[:L]
def HKDF(secret, salt, info, size, hashmod=hashlib.sha256):
"""HKDF-extract-and-expand as a single function."""
PRK = HKDF_extract(salt, secret, hashmod)
return HKDF_expand(PRK, info, size, hashmod)
# If httpie is installed, register the hawk plugin.
try:
from httpie.plugins import AuthPlugin
class HawkPlugin(AuthPlugin):
name = 'Hawk Auth'
auth_type = 'hawk'
description = ''
def get_auth(self, username, password):
if password == '':
return HawkAuth(hawk_session=username)
return HawkAuth(id=username, key=password)
except ImportError:
pass | /requests-hawk-1.2.1.tar.gz/requests-hawk-1.2.1/requests_hawk/__init__.py | 0.655446 | 0.252303 | __init__.py | pypi |
from requests_html import HTMLResponse, _Find, _XPath
from typing import Text, List, Union, Mapping
import sys
# Sanity checking.
try:
assert sys.version_info.major == 3
assert sys.version_info.minor > 5
except AssertionError:
raise RuntimeError('TPFD requires Python 3.6+!')
# Typing
_ParseResponse = Union[List, Mapping, _Find, _XPath]
class Rule:
def __init__(self, rule: Text, func: callable, rule_type: Text, clean: bool = False, first: bool = False) -> None:
self._rule = rule
self._func = func
self._rule_type = rule_type
self._first = first
self._clean = clean
@property
def rule(self) -> Text:
return self._rule
@property
def function(self) -> callable:
return self._func
@property
def rule_type(self) -> Text:
return self._rule_type
@property
def first(self) -> bool:
return self._first
@property
def clean(self) -> bool:
return self._clean
class Macro:
def __init__(self, response: HTMLResponse) -> None:
self.debug = False
self._rules = []
self._response = response
@property
def rules(self) -> List[Rule]:
return self._rules
@property
def response(self) -> HTMLResponse:
return self._result
@response.setter
def response(self, response: HTMLResponse) -> None:
self._response = response
def search_pattern(self, template: Text, first: bool = False) -> callable:
"""
Decorator for parse search pattern
"""
def parse_decorator(func: callable) -> callable:
r = Rule(rule=template, func=func, rule_type='search', first=first)
self._rules.append(r)
return func
return parse_decorator
def css_selector(self, selector: Text, first: bool = False, clean: bool = False) -> callable:
"""
Decorator for css selector rules.
"""
def find_decorator(func: callable) -> callable:
r = Rule(rule=selector, func=func, rule_type='find', first=first, clean=clean)
self._rules.append(r)
return func
return find_decorator
def xpath(self, selector: str, first: bool = False, clean: bool = False) -> callable:
"""
Decorator for xpath selector rules.
"""
def xpath_decorator(func: callable) -> callable:
r = Rule(rule=selector, func=func, rule_type='xpath', first=first, clean=clean)
self._rules.append(r)
return func
return xpath_decorator
def parse(self) -> _ParseResponse:
for i in self._rules:
if i.rule_type is 'search':
if i.first:
r = self._response.html.search(i.rule)
else:
r = self._response.html.search_all(i.rule)
if r is not None:
i.function(r)
elif i.rule_type is 'find':
r = self._response.html.find(selector=i.rule, first=i.first, clean=i.clean)
if r is not None:
i.function(r)
elif i.rule_type is 'xpath':
r = self._response.html.xpath(selector=i.rule, first=i.first, clean=i.clean)
if r is not None:
i.function(r) | /requests-html-macros-0.1.1.tar.gz/requests-html-macros-0.1.1/requests_html_macro.py | 0.45641 | 0.34715 | requests_html_macro.py | pypi |
import sys
import asyncio
from urllib.parse import urlparse, urlunparse, urljoin
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures._base import TimeoutError
from functools import partial
from typing import Set, Union, List, MutableMapping, Optional
import pyppeteer
import requests
from pyquery import PyQuery
from fake_useragent import UserAgent
from lxml.html.clean import Cleaner
import lxml
from lxml import etree
from lxml.html import HtmlElement
from lxml.html import tostring as lxml_html_tostring
from lxml.html.soupparser import fromstring as soup_parse
from parse import search as parse_search
from parse import findall, Result
from w3lib.encoding import html_to_unicode
DEFAULT_ENCODING = 'utf-8'
DEFAULT_URL = 'https://example.org/'
DEFAULT_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8'
DEFAULT_NEXT_SYMBOL = ['next', 'more', 'older']
cleaner = Cleaner()
cleaner.javascript = True
cleaner.style = True
useragent = None
# Typing.
_Find = Union[List['Element'], 'Element']
_XPath = Union[List[str], List['Element'], str, 'Element']
_Result = Union[List['Result'], 'Result']
_HTML = Union[str, bytes]
_BaseHTML = str
_UserAgent = str
_DefaultEncoding = str
_URL = str
_RawHTML = bytes
_Encoding = str
_LXML = HtmlElement
_Text = str
_Search = Result
_Containing = Union[str, List[str]]
_Links = Set[str]
_Attrs = MutableMapping
_Next = Union['HTML', List[str]]
_NextSymbol = List[str]
# Sanity checking.
try:
assert sys.version_info.major == 3
assert sys.version_info.minor > 5
except AssertionError:
raise RuntimeError('Requests-HTML requires Python 3.6+!')
class MaxRetries(Exception):
def __init__(self, message):
self.message = message
class BaseParser:
"""A basic HTML/Element Parser, for Humans.
:param element: The element from which to base the parsing upon.
:param default_encoding: Which encoding to default to.
:param html: HTML from which to base the parsing upon (optional).
:param url: The URL from which the HTML originated, used for ``absolute_links``.
"""
def __init__(self, *, element, default_encoding: _DefaultEncoding = None, html: _HTML = None, url: _URL) -> None:
self.element = element
self.url = url
self.skip_anchors = True
self.default_encoding = default_encoding
self._encoding = None
self._html = html.encode(DEFAULT_ENCODING) if isinstance(html, str) else html
self._lxml = None
self._pq = None
@property
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
@property
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
@html.setter
def html(self, html: str) -> None:
self._html = html.encode(self.encoding)
@raw_html.setter
def raw_html(self, html: bytes) -> None:
"""Property setter for self.html."""
self._html = html
@property
def encoding(self) -> _Encoding:
"""The encoding string to be used, extracted from the HTML and
:class:`HTMLResponse <HTMLResponse>` headers.
"""
if self._encoding:
return self._encoding
# Scan meta tags for charset.
if self._html:
self._encoding = html_to_unicode(self.default_encoding, self._html)[0]
# Fall back to requests' detected encoding if decode fails.
try:
self.raw_html.decode(self.encoding, errors='replace')
except UnicodeDecodeError:
self._encoding = self.default_encoding
return self._encoding if self._encoding else self.default_encoding
@encoding.setter
def encoding(self, enc: str) -> None:
"""Property setter for self.encoding."""
self._encoding = enc
@property
def pq(self) -> PyQuery:
"""`PyQuery <https://pythonhosted.org/pyquery/>`_ representation
of the :class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._pq is None:
self._pq = PyQuery(self.lxml)
return self._pq
@property
def lxml(self) -> HtmlElement:
"""`lxml <http://lxml.de>`_ representation of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
if self._lxml is None:
try:
self._lxml = soup_parse(self.html, features='html.parser')
except ValueError:
self._lxml = lxml.html.fromstring(self.raw_html)
return self._lxml
@property
def text(self) -> _Text:
"""The text content of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.pq.text()
@property
def full_text(self) -> _Text:
"""The full text content (including links) of the
:class:`Element <Element>` or :class:`HTML <HTML>`.
"""
return self.lxml.text_content()
def find(self, selector: str = "*", *, containing: _Containing = None, clean: bool = False, first: bool = False, _encoding: str = None) -> _Find:
"""Given a CSS Selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: CSS Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param containing: If specified, only return elements that contain the provided text.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
Example CSS Selectors:
- ``a``
- ``a.someClass``
- ``a#someID``
- ``a[target=_blank]``
See W3School's `CSS Selectors Reference
<https://www.w3schools.com/cssref/css_selectors.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
# Convert a single containing into a list.
if isinstance(containing, str):
containing = [containing]
encoding = _encoding or self.encoding
elements = [
Element(element=found, url=self.url, default_encoding=encoding)
for found in self.pq(selector)
]
if containing:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
if any([c.lower() in element.full_text.lower() for c in containing]):
elements.append(element)
elements.reverse()
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def xpath(self, selector: str, *, clean: bool = False, first: bool = False, _encoding: str = None) -> _XPath:
"""Given an XPath selector, returns a list of
:class:`Element <Element>` objects or a single one.
:param selector: XPath Selector to use.
:param clean: Whether or not to sanitize the found HTML of ``<script>`` and ``<style>`` tags.
:param first: Whether or not to return just the first result.
:param _encoding: The encoding format.
If a sub-selector is specified (e.g. ``//a/@href``), a simple
list of results is returned.
See W3School's `XPath Examples
<https://www.w3schools.com/xml/xpath_examples.asp>`_
for more details.
If ``first`` is ``True``, only returns the first
:class:`Element <Element>` found.
"""
selected = self.lxml.xpath(selector)
elements = [
Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)
if not isinstance(selection, etree._ElementUnicodeResult) else str(selection)
for selection in selected
]
# Sanitize the found HTML.
if clean:
elements_copy = elements.copy()
elements = []
for element in elements_copy:
element.raw_html = lxml_html_tostring(cleaner.clean_html(element.lxml))
elements.append(element)
return _get_first_or_list(elements, first)
def search(self, template: str) -> Result:
"""Search the :class:`Element <Element>` for the given Parse template.
:param template: The Parse template to use.
"""
return parse_search(template, self.html)
def search_all(self, template: str) -> _Result:
"""Search the :class:`Element <Element>` (multiple times) for the given parse
template.
:param template: The Parse template to use.
"""
return [r for r in findall(template, self.html)]
@property
def links(self) -> _Links:
"""All found links on page, in as–is form."""
def gen():
for link in self.find('a'):
try:
href = link.attrs['href'].strip()
if href and not (href.startswith('#') and self.skip_anchors) and not href.startswith(('javascript:', 'mailto:')):
yield href
except KeyError:
pass
return set(gen())
def _make_absolute(self, link):
"""Makes a given link absolute."""
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed['netloc']:
return urljoin(self.base_url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed['scheme']:
parsed['scheme'] = urlparse(self.base_url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
# Link is absolute and complete with scheme; nothing to be done here.
return link
@property
def absolute_links(self) -> _Links:
"""All found links on page, in absolute form
(`learn more <https://www.navegabem.com/absolute-or-relative-links.html>`_).
"""
def gen():
for link in self.links:
yield self._make_absolute(link)
return set(gen())
@property
def base_url(self) -> _URL:
"""The base URL for the page. Supports the ``<base>`` tag
(`learn more <https://www.w3schools.com/tags/tag_base.asp>`_)."""
# Support for <base> tag.
base = self.find('base', first=True)
if base:
result = base.attrs.get('href', '').strip()
if result:
return result
# Parse the url to separate out the path
parsed = urlparse(self.url)._asdict()
# Remove any part of the path after the last '/'
parsed['path'] = '/'.join(parsed['path'].split('/')[:-1]) + '/'
# Reconstruct the url with the modified path
parsed = (v for v in parsed.values())
url = urlunparse(parsed)
return url
class Element(BaseParser):
"""An element of HTML.
:param element: The element from which to base the parsing upon.
:param url: The URL from which the HTML originated, used for ``absolute_links``.
:param default_encoding: Which encoding to default to.
"""
__slots__ = [
'element', 'url', 'skip_anchors', 'default_encoding', '_encoding',
'_html', '_lxml', '_pq', '_attrs', 'session'
]
def __init__(self, *, element, url: _URL, default_encoding: _DefaultEncoding = None) -> None:
super(Element, self).__init__(element=element, url=url, default_encoding=default_encoding)
self.element = element
self.tag = element.tag
self.lineno = element.sourceline
self._attrs = None
def __repr__(self) -> str:
attrs = ['{}={}'.format(attr, repr(self.attrs[attr])) for attr in self.attrs]
return "<Element {} {}>".format(repr(self.element.tag), ' '.join(attrs))
@property
def attrs(self) -> _Attrs:
"""Returns a dictionary of the attributes of the :class:`Element <Element>`
(`learn more <https://www.w3schools.com/tags/ref_attributes.asp>`_).
"""
if self._attrs is None:
self._attrs = {k: v for k, v in self.element.items()}
# Split class and rel up, as there are ussually many of them:
for attr in ['class', 'rel']:
if attr in self._attrs:
self._attrs[attr] = tuple(self._attrs[attr].split())
return self._attrs
class HTML(BaseParser):
"""An HTML document, ready for parsing.
:param url: The URL from which the HTML originated, used for ``absolute_links``.
:param html: HTML from which to base the parsing upon (optional).
:param default_encoding: Which encoding to default to.
"""
def __init__(self, *, session: Union['HTMLSession', 'AsyncHTMLSession'] = None, url: str = DEFAULT_URL, html: _HTML, default_encoding: str = DEFAULT_ENCODING, async_: bool = False) -> None:
# Convert incoming unicode HTML into bytes.
if isinstance(html, str):
html = html.encode(DEFAULT_ENCODING)
super(HTML, self).__init__(
# Convert unicode HTML to bytes.
element=PyQuery(html)('html') or PyQuery(f'<html>{html}</html>')('html'),
html=html,
url=url,
default_encoding=default_encoding
)
self.session = session or async_ and AsyncHTMLSession() or HTMLSession()
self.page = None
self.next_symbol = DEFAULT_NEXT_SYMBOL
def __repr__(self) -> str:
return f"<HTML url={self.url!r}>"
def next(self, fetch: bool = False, next_symbol: _NextSymbol = DEFAULT_NEXT_SYMBOL) -> _Next:
"""Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL.
"""
def get_next():
candidates = self.find('a', containing=next_symbol)
for candidate in candidates:
if candidate.attrs.get('href'):
# Support 'next' rel (e.g. reddit).
if 'next' in candidate.attrs.get('rel', []):
return candidate.attrs['href']
# Support 'next' in classnames.
for _class in candidate.attrs.get('class', []):
if 'next' in _class:
return candidate.attrs['href']
if 'page' in candidate.attrs['href']:
return candidate.attrs['href']
try:
# Resort to the last candidate.
return candidates[-1].attrs['href']
except IndexError:
return None
__next = get_next()
if __next:
url = self._make_absolute(__next)
else:
return None
if fetch:
return self.session.get(url)
else:
return url
def __iter__(self):
next = self
while True:
yield next
try:
next = next.next(fetch=True, next_symbol=self.next_symbol).html
except AttributeError:
break
def __next__(self):
return self.next(fetch=True, next_symbol=self.next_symbol).html
def __aiter__(self):
return self
async def __anext__(self):
while True:
url = self.next(fetch=False, next_symbol=self.next_symbol)
if not url:
break
response = await self.session.get(url)
return response.html
def add_next_symbol(self, next_symbol):
self.next_symbol.append(next_symbol)
async def _async_render(self, *, url: str, script: str = None, scrolldown, sleep: int, wait: float, reload, content: Optional[str], timeout: Union[float, int], keep_page: bool):
""" Handle page creation and js rendering. Internal use for render/arender methods. """
try:
page = await self.browser.newPage()
# Wait before rendering the page, to prevent timeouts.
await asyncio.sleep(wait)
# Load the given page (GET request, obviously.)
if reload:
await page.goto(url, options={'timeout': int(timeout * 1000)})
else:
await page.goto(f'data:text/html,{self.html}', options={'timeout': int(timeout * 1000)})
result = None
if script:
result = await page.evaluate(script)
if scrolldown:
for _ in range(scrolldown):
await page._keyboard.down('PageDown')
await asyncio.sleep(sleep)
else:
await asyncio.sleep(sleep)
if scrolldown:
await page._keyboard.up('PageDown')
# Return the content of the page, JavaScript evaluated.
content = await page.content()
if not keep_page:
await page.close()
page = None
return content, result, page
except TimeoutError:
await page.close()
page = None
return None
def render(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
"""Reloads the response in Chromium, and replaces HTML content
with an updated version, with JavaScript executed.
:param retries: The number of times to retry loading the page in Chromium.
:param script: JavaScript to execute upon page load (optional).
:param wait: The number of seconds to wait before loading the page, preventing timeouts (optional).
:param scrolldown: Integer, if provided, of how many times to page down.
:param sleep: Integer, if provided, of how many long to sleep after initial render.
:param reload: If ``False``, content will not be loaded from the browser, but will be provided from memory.
:param keep_page: If ``True`` will allow you to interact with the browser page through ``r.html.page``.
If ``scrolldown`` is specified, the page will scrolldown the specified
number of times, after sleeping the specified amount of time
(e.g. ``scrolldown=10, sleep=1``).
If just ``sleep`` is provided, the rendering will wait *n* seconds, before
returning.
If ``script`` is specified, it will execute the provided JavaScript at
runtime. Example:
.. code-block:: python
script = \"\"\"
() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio,
}
}
\"\"\"
Returns the return value of the executed ``script``, if any is provided:
.. code-block:: python
>>> r.html.render(script=script)
{'width': 800, 'height': 600, 'deviceScaleFactor': 1}
Warning: the first time you run this method, it will download
Chromium into your home directory (``~/.pyppeteer``).
"""
self.browser = self.session.browser # Automatically create a event loop and browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for i in range(retries):
if not content:
try:
content, result, page = self.session.loop.run_until_complete(self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page))
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result
async def arender(self, retries: int = 8, script: str = None, wait: float = 0.2, scrolldown=False, sleep: int = 0, reload: bool = True, timeout: Union[float, int] = 8.0, keep_page: bool = False):
""" Async version of render. Takes same parameters. """
self.browser = await self.session.browser
content = None
# Automatically set Reload to False, if example URL is being used.
if self.url == DEFAULT_URL:
reload = False
for _ in range(retries):
if not content:
try:
content, result, page = await self._async_render(url=self.url, script=script, sleep=sleep, wait=wait, content=self.html, reload=reload, scrolldown=scrolldown, timeout=timeout, keep_page=keep_page)
except TypeError:
pass
else:
break
if not content:
raise MaxRetries("Unable to render the page. Try increasing timeout")
html = HTML(url=self.url, html=content.encode(DEFAULT_ENCODING), default_encoding=DEFAULT_ENCODING)
self.__dict__.update(html.__dict__)
self.page = page
return result
class HTMLResponse(requests.Response):
"""An HTML-enabled :class:`requests.Response <requests.Response>` object.
Effectively the same, but with an intelligent ``.html`` property added.
"""
def __init__(self, session: Union['HTMLSession', 'AsyncHTMLSession']) -> None:
super(HTMLResponse, self).__init__()
self._html = None # type: HTML
self.session = session
@property
def html(self) -> HTML:
if not self._html:
self._html = HTML(session=self.session, url=self.url, html=self.content, default_encoding=self.encoding)
return self._html
@classmethod
def _from_response(cls, response, session: Union['HTMLSession', 'AsyncHTMLSession']):
html_r = cls(session=session)
html_r.__dict__.update(response.__dict__)
return html_r
def user_agent(style=None) -> _UserAgent:
"""Returns an apparently legit user-agent, if not requested one of a specific
style. Defaults to a Chrome-style User-Agent.
"""
global useragent
if (not useragent) and style:
useragent = UserAgent()
return useragent[style] if style else DEFAULT_USER_AGENT
def _get_first_or_list(l, first=False):
if first:
try:
return l[0]
except IndexError:
return None
else:
return l
class BaseSession(requests.Session):
""" A consumable session, for cookie persistence and connection pooling,
amongst other things.
"""
def __init__(self, mock_browser : bool = True, verify : bool = True,
browser_args : list = ['--no-sandbox']):
super().__init__()
# Mock a web browser's user agent.
if mock_browser:
self.headers['User-Agent'] = user_agent()
self.hooks['response'].append(self.response_hook)
self.verify = verify
self.__browser_args = browser_args
def response_hook(self, response, **kwargs) -> HTMLResponse:
""" Change response enconding and replace it by a HTMLResponse. """
if not response.encoding:
response.encoding = DEFAULT_ENCODING
return HTMLResponse._from_response(response, self)
@property
async def browser(self):
if not hasattr(self, "_browser"):
self._browser = await pyppeteer.launch(ignoreHTTPSErrors=not(self.verify), headless=True, args=self.__browser_args)
return self._browser
class HTMLSession(BaseSession):
def __init__(self, **kwargs):
super(HTMLSession, self).__init__(**kwargs)
@property
def browser(self):
if not hasattr(self, "_browser"):
self.loop = asyncio.get_event_loop()
if self.loop.is_running():
raise RuntimeError("Cannot use HTMLSession within an existing event loop. Use AsyncHTMLSession instead.")
self._browser = self.loop.run_until_complete(super().browser)
return self._browser
def close(self):
""" If a browser was created close it first. """
if hasattr(self, "_browser"):
self.loop.run_until_complete(self._browser.close())
super().close()
class AsyncHTMLSession(BaseSession):
""" An async consumable session. """
def __init__(self, loop=None, workers=None,
mock_browser: bool = True, *args, **kwargs):
""" Set or create an event loop and a thread pool.
:param loop: Asyncio loop to use.
:param workers: Amount of threads to use for executing async calls.
If not pass it will default to the number of processors on the
machine, multiplied by 5. """
super().__init__(*args, **kwargs)
self.loop = loop or asyncio.get_event_loop()
self.thread_pool = ThreadPoolExecutor(max_workers=workers)
def request(self, *args, **kwargs):
""" Partial original request func and run it in a thread. """
func = partial(super().request, *args, **kwargs)
return self.loop.run_in_executor(self.thread_pool, func)
async def close(self):
""" If a browser was created close it first. """
if hasattr(self, "_browser"):
await self._browser.close()
super().close()
def run(self, *coros):
""" Pass in all the coroutines you want to run, it will wrap each one
in a task, run it and wait for the result. Return a list with all
results, this is returned in the same order coros are passed in. """
tasks = [
asyncio.ensure_future(coro()) for coro in coros
]
done, _ = self.loop.run_until_complete(asyncio.wait(tasks))
return [t.result() for t in done] | /requests_html-0.10.0-py3-none-any.whl/requests_html.py | 0.651133 | 0.175538 | requests_html.py | pypi |
import base64, hashlib, hmac, time
import email.utils
import requests
from urllib.parse import urlparse
from requests.exceptions import RequestException
class RequestsHttpMessageSignaturesException(RequestException):
"""An error occurred while constructing the HTTP Signature for your request."""
class Crypto:
def __init__(self, algorithm):
if algorithm != "hmac-sha256":
from cryptography.hazmat.primitives.serialization import (
load_pem_private_key,
load_pem_public_key,
)
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa, ec
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
from cryptography.hazmat.primitives.hashes import SHA1, SHA256, SHA512
self.__dict__.update(locals())
def sign(self, string_to_sign, key, passphrase=None):
if self.algorithm == "hmac-sha256":
return hmac.new(key, string_to_sign, digestmod=hashlib.sha256).digest()
key = self.load_pem_private_key(
key, password=passphrase, backend=self.default_backend()
)
if self.algorithm in {"rsa-sha1", "rsa-sha256"}:
hasher = self.SHA1() if self.algorithm.endswith("sha1") else self.SHA256()
return key.sign(
padding=self.PKCS1v15(), algorithm=hasher, data=string_to_sign
)
elif self.algorithm in {"rsa-sha512"}:
hasher = self.SHA512()
return key.sign(
padding=self.PKCS1v15(), algorithm=hasher, data=string_to_sign
)
elif self.algorithm == "ecdsa-sha256":
return key.sign(
signature_algorithm=self.ec.ECDSA(algorithm=self.SHA256()),
data=string_to_sign,
)
def verify(self, signature, string_to_sign, key):
if self.algorithm == "hmac-sha256":
assert (
signature
== hmac.new(key, string_to_sign, digestmod=hashlib.sha256).digest()
)
else:
key = self.load_pem_public_key(key, backend=self.default_backend())
hasher = self.SHA1() if self.algorithm.endswith("sha1") else self.SHA256()
if self.algorithm == "ecdsa-sha256":
key.verify(signature, string_to_sign, self.ec.ECDSA(hasher))
else:
key.verify(signature, string_to_sign, self.PKCS1v15(), hasher)
class HTTPSignatureAuth(requests.auth.AuthBase):
hasher_constructor = hashlib.sha256
known_algorithms = {
"rsa-sha1",
"rsa-sha256",
"rsa-sha512",
"hmac-sha256",
"ecdsa-sha256",
}
def __init__(
self,
key,
key_id,
algorithm="hmac-sha256",
headers=None,
passphrase=None,
expires_in=None,
):
"""
:param typing.Union[bytes, string] passphrase: The passphrase for an encrypted RSA private key
:param datetime.timedelta expires_in: The time after which this signature should expire
"""
assert algorithm in self.known_algorithms
self.key = key
self.key_id = key_id
self.algorithm = algorithm
self.headers = [h.lower() for h in headers] if headers is not None else ["date"]
self.passphrase = (
passphrase
if passphrase is None or isinstance(passphrase, bytes)
else passphrase.encode()
)
self.expires_in = expires_in
def add_date(self, request, timestamp):
if "Date" not in request.headers:
request.headers["Date"] = email.utils.formatdate(timestamp, usegmt=True)
def add_digest(self, request):
if request.body is None and "digest" in self.headers:
raise RequestsHttpMessageSignaturesException(
"Could not compute digest header for request without a body"
)
if request.body is not None and "Digest" not in request.headers:
if "digest" not in self.headers:
self.headers.append("digest")
digest = self.hasher_constructor(request.body).digest()
request.headers["Digest"] = "SHA-256=" + base64.b64encode(digest).decode()
@classmethod
def get_string_to_sign(cls, request, headers, created_timestamp, expires_timestamp):
sts = []
for header in headers:
if header == "(request-target)":
path_url = requests.models.RequestEncodingMixin.path_url.fget(request)
sts.append("{}: {} {}".format(header, request.method.lower(), path_url))
elif header == "(created)" and created_timestamp:
sts.append("{}: {}".format(header, created_timestamp))
elif header == "(expires)":
assert (
expires_timestamp is not None
), 'You should provide the "expires_in" argument when using the (expires) header'
sts.append("{}: {}".format(header, int(expires_timestamp)))
else:
if header.lower() == "host":
url = urlparse(request.url)
value = request.headers.get("host", url.hostname)
if (
url.scheme == "http"
and url.port not in [None, 80]
or url.scheme == "https"
and url.port not in [443, None]
):
value = "{}:{}".format(value, url.port)
else:
value = request.headers[header]
sts.append("{k}: {v}".format(k=header.lower(), v=value))
return "\n".join(sts).encode()
def create_signature_string(self, request):
created_timestamp = int(time.time())
expires_timestamp = None
if self.expires_in is not None:
expires_timestamp = created_timestamp + self.expires_in.total_seconds()
self.add_date(request, created_timestamp)
self.add_digest(request)
raw_sig = Crypto(self.algorithm).sign(
string_to_sign=self.get_string_to_sign(
request, self.headers, created_timestamp, expires_timestamp
),
key=self.key.encode() if isinstance(self.key, str) else self.key,
passphrase=self.passphrase,
)
sig = base64.b64encode(raw_sig).decode()
sig_struct = [
("keyId", self.key_id),
("algorithm", self.algorithm),
("headers", " ".join(self.headers)),
("signature", sig),
]
if not (
self.algorithm.startswith("rsa")
or self.algorithm.startswith("hmac")
or self.algorithm.startswith("ecdsa")
):
sig_struct.append(("created", int(created_timestamp)))
if expires_timestamp is not None:
sig_struct.append(("expires", int(expires_timestamp)))
return ",".join('{}="{}"'.format(k, v) for k, v in sig_struct)
def __call__(self, request):
request.headers["Authorization"] = "Signature " + self.create_signature_string(
request
)
return request
@classmethod
def get_sig_struct(cls, request, scheme="Authorization"):
sig_struct = request.headers[scheme]
if scheme == "Authorization":
sig_struct = sig_struct.split(" ", 1)[1]
return {
i.split("=", 1)[0]: i.split("=", 1)[1].strip('"')
for i in sig_struct.split(",")
}
@classmethod
def verify(cls, request, key_resolver, scheme="Authorization"):
if scheme == "Authorization":
assert "Authorization" in request.headers, "No Authorization header found"
msg = (
'Unexpected scheme found in Authorization header (expected "Signature")'
)
assert request.headers["Authorization"].startswith("Signature "), msg
elif scheme == "Signature":
assert "Signature" in request.headers, "No Signature header found"
else:
raise RequestsHttpMessageSignaturesException(
'Unknown signature scheme "{}"'.format(scheme)
)
sig_struct = cls.get_sig_struct(request, scheme=scheme)
for field in "keyId", "algorithm", "signature":
assert (
field in sig_struct
), 'Required signature parameter "{}" not found'.format(field)
assert (
sig_struct["algorithm"] in cls.known_algorithms
), "Unknown signature algorithm"
created_timestamp = (
int(sig_struct["created"]) if "created" in sig_struct else None
)
expires_timestamp = sig_struct.get("expires")
if expires_timestamp is not None:
expires_timestamp = int(expires_timestamp)
headers = sig_struct.get("headers", "date").split(" ")
sig = base64.b64decode(sig_struct["signature"])
sts = cls.get_string_to_sign(
request, headers, created_timestamp, expires_timestamp=expires_timestamp
)
key = key_resolver(
key_id=sig_struct["keyId"], algorithm=sig_struct["algorithm"]
)
Crypto(sig_struct["algorithm"]).verify(sig, sts, key)
if expires_timestamp is not None:
assert expires_timestamp > int(time.time())
class HTTPSignatureHeaderAuth(HTTPSignatureAuth):
"""
https://tools.ietf.org/html/draft-cavage-http-signatures-08#section-4
Using "Signature" header instead of "Authorization" header.
"""
def __call__(self, request):
request.headers["Signature"] = self.create_signature_string(request)
return request
@classmethod
def verify(cls, request, key_resolver):
return super().verify(request, key_resolver, scheme="Signature") | /requests-http-message-signatures-0.3.1.tar.gz/requests-http-message-signatures-0.3.1/requests_http_message_signatures/__init__.py | 0.679604 | 0.248113 | __init__.py | pypi |
import datetime
import email.utils
import hashlib
import secrets
from typing import Union, Sequence, Type
import http_sfv
import requests
from requests.exceptions import RequestException
from http_message_signatures import (algorithms, HTTPSignatureComponentResolver, HTTPSignatureKeyResolver, # noqa: F401
HTTPMessageSigner, HTTPMessageVerifier, HTTPSignatureAlgorithm, InvalidSignature)
from http_message_signatures.structures import CaseInsensitiveDict, VerifyResult
class RequestsHttpSignatureException(RequestException):
"""An error occurred while constructing the HTTP Signature for your request."""
class SingleKeyResolver(HTTPSignatureKeyResolver):
def __init__(self, key_id, key):
self.key_id = key_id
self.key = key
def resolve_public_key(self, key_id):
assert key_id == self.key_id
return self.key
def resolve_private_key(self, key_id):
assert key_id == self.key_id
return self.key
class HTTPSignatureAuth(requests.auth.AuthBase):
"""
A `Requests <https://github.com/requests/requests>`_ `authentication plugin
<http://docs.python-requests.org/en/master/user/authentication/>`_ (``requests.auth.AuthBase`` subclass)
implementing the `IETF HTTP Message Signatures draft RFC
<https://datatracker.ietf.org/doc/draft-ietf-httpbis-message-signatures/>`_.
:param signature_algorithm:
One of ``requests_http_signature.algorithms.HMAC_SHA256``,
``requests_http_signature.algorithms.ECDSA_P256_SHA256``,
``requests_http_signature.algorithms.ED25519``,
``requests_http_signature.algorithms.RSA_PSS_SHA512``, or
``requests_http_signature.algorithms.RSA_V1_5_SHA256``.
:param key:
Key material that will be used to sign the request. In the case of HMAC, this should be the raw bytes of the
shared secret; for all other algorithms, this should be the bytes of the PEM-encoded private key material.
:param key_id: The key ID to use in the signature.
:param key_resolver:
Instead of specifying a fixed key, you can instead pass a key resolver, which should be an instance of a
subclass of ``http_message_signatures.HTTPSignatureKeyResolver``. A key resolver should have two methods,
``get_private_key(key_id)`` (required only for signing) and ``get_public_key(key_id)`` (required only for
verifying). Your implementation should ensure that the key id is recognized and return the corresponding
key material as PEM bytes (or shared secret bytes for HMAC).
:param covered_component_ids:
A list of lowercased header names or derived component IDs (``@method``, ``@target-uri``, ``@authority``,
``@scheme``, ``@request-target``, ``@path``, ``@query``, ``@query-params``, ``@status``, or
``@request-response``, as specified in the standard) to sign. By default, ``@method``, ``@authority``,
and ``@target-uri`` are covered, and the ``Authorization``, ``Content-Digest``, and ``Date`` header fields
are always covered if present.
:param label: The label to use to identify the signature.
:param include_alg:
By default, the signature parameters will include the ``alg`` parameter, using it to identify the signature
algorithm. If you wish not to include this parameter, set this to ``False``.
:param use_nonce:
Set this to ``True`` to include a unique message-specific nonce in the signature parameters. The format of
the nonce can be controlled by subclassing this class and overloading the ``get_nonce()`` method.
:param expires_in:
Use this to set the ``expires`` signature parameter to the time of signing plus the given timedelta.
"""
component_resolver_class: type = HTTPSignatureComponentResolver
"""
A subclass of ``http_message_signatures.HTTPSignatureComponentResolver`` can be used to override this value
to customize the retrieval of header and derived component values if needed.
"""
_content_digest_hashers = {"sha-256": hashlib.sha256, "sha-512": hashlib.sha512}
signing_content_digest_algorithm = "sha-256"
"The hash algorithm to use to generate the Content-Digest header field (either ``sha-256`` or ``sha-512``)."
_auto_cover_header_fields = {"authorization", "content-digest", "date"}
def __init__(self, *,
signature_algorithm: Type[HTTPSignatureAlgorithm],
key: bytes = None,
key_id: str,
key_resolver: HTTPSignatureKeyResolver = None,
covered_component_ids: Sequence[str] = ("@method", "@authority", "@target-uri"),
label: str = None,
include_alg: bool = True,
use_nonce: bool = False,
expires_in: datetime.timedelta = None):
if key_resolver is None and key is None:
raise RequestsHttpSignatureException("Either key_resolver or key must be specified.")
if key_resolver is not None and key is not None:
raise RequestsHttpSignatureException("Either key_resolver or key must be specified, not both.")
if key_resolver is None:
key_resolver = SingleKeyResolver(key_id=key_id, key=key)
self.key_id = key_id
self.label = label
self.include_alg = include_alg
self.use_nonce = use_nonce
self.covered_component_ids = covered_component_ids
self.expires_in = expires_in
self.signer = HTTPMessageSigner(signature_algorithm=signature_algorithm,
key_resolver=key_resolver,
component_resolver_class=self.component_resolver_class)
def add_date(self, request, timestamp):
if "Date" not in request.headers:
request.headers["Date"] = email.utils.formatdate(timestamp, usegmt=True)
def add_digest(self, request):
if request.body is None and "content-digest" in self.covered_component_ids:
raise RequestsHttpSignatureException("Could not compute digest header for request without a body")
if request.body is not None:
if "Content-Digest" not in request.headers:
hasher = self._content_digest_hashers[self.signing_content_digest_algorithm]
digest = hasher(request.body).digest()
digest_node = http_sfv.Dictionary({self.signing_content_digest_algorithm: digest})
request.headers["Content-Digest"] = str(digest_node)
def get_nonce(self, request):
if self.use_nonce:
return secrets.token_urlsafe(16)
def get_created(self, request):
created = datetime.datetime.now()
self.add_date(request, timestamp=int(created.timestamp()))
return created
def get_expires(self, request, created):
if self.expires_in:
return datetime.datetime.now() + self.expires_in
def get_covered_component_ids(self, request):
covered_component_ids = CaseInsensitiveDict((k, None) for k in self.covered_component_ids)
headers = CaseInsensitiveDict(request.headers)
for header in self._auto_cover_header_fields:
if header in headers:
covered_component_ids.setdefault(header, None)
return list(covered_component_ids)
def __call__(self, request):
self.add_digest(request)
created = self.get_created(request)
expires = self.get_expires(request, created=created)
covered_component_ids = self.get_covered_component_ids(request)
self.signer.sign(request,
key_id=self.key_id,
created=created,
expires=expires,
nonce=self.get_nonce(request),
label=self.label,
include_alg=self.include_alg,
covered_component_ids=covered_component_ids)
return request
@classmethod
def get_body(cls, message):
if isinstance(message, requests.Response):
return message.content
return message.body
@classmethod
def verify(cls, message: Union[requests.PreparedRequest, requests.Response], *,
require_components: Sequence[str] = ("@method", "@authority", "@target-uri"),
signature_algorithm: Type[HTTPSignatureAlgorithm],
key_resolver: HTTPSignatureKeyResolver,
max_age: datetime.timedelta = datetime.timedelta(days=1)) -> VerifyResult:
"""
Verify an HTTP message signature.
.. admonition:: See what is signed
It is important to understand and follow the best practice rule of "See what is signed" when verifying HTTP
message signatures. The gist of this rule is: if your application neglects to verify that the information it
trusts is what was actually signed, the attacker can supply a valid signature but point you to malicious data
that wasn't signed by that signature. Failure to follow this rule can lead to vulnerability against signature
wrapping and substitution attacks.
You can ensure that the information signed is what you expect to be signed by only trusting the *VerifyResult*
tuple returned by ``verify()``.
:param message:
The HTTP response or request to verify. You can either pass a received response, or reconstruct an arbitrary
request using the `Requests API <https://docs.python-requests.org/en/latest/api/#requests.Request>`_::
request = requests.Request(...)
prepared_request = request.prepare()
HTTPSignatureAuth.verify(prepared_request, ...)
:param require_components:
A list of lowercased header names or derived component IDs (``@method``, ``@target-uri``, ``@authority``,
``@scheme``, ``@request-target``, ``@path``, ``@query``, ``@query-params``, ``@status``, or
``@request-response``, as specified in the standard) to require to be covered by the signature. If the
``content-digest`` header field is specified here (recommended for messages that have a body), it will be
verified by matching it against the digest hash computed on the body of the message (expected to be bytes).
If this parameter is not specified, ``verify()`` will set it to ``("@method", "@authority", "@target-uri")``
for messages without a body, and ``("@method", "@authority", "@target-uri", "content-digest")`` for messages
with a body.
:param signature_algorithm:
The algorithm expected to be used by the signature. Any signature not using the expected algorithm will
cause an ``InvalidSignature`` exception. Must be one of ``requests_http_signature.algorithms.HMAC_SHA256``,
``requests_http_signature.algorithms.ECDSA_P256_SHA256``,
``requests_http_signature.algorithms.ED25519``,
``requests_http_signature.algorithms.RSA_PSS_SHA512``, or
``requests_http_signature.algorithms.RSA_V1_5_SHA256``.
:param key_resolver:
A key resolver, which should be an instance of a subclass of
``http_message_signatures.HTTPSignatureKeyResolver``. A key resolver should have two methods,
``get_private_key(key_id)`` (required only for signing) and ``get_public_key(key_id)`` (required only for
verifying). Your implementation should ensure that the key id is recognized and return the corresponding
key material as PEM bytes (or shared secret bytes for HMAC).
:param max_age:
The maximum age of the signature, defined as the difference between the ``created`` parameter value and now.
:returns: *VerifyResult*, a namedtuple with the following attributes:
* ``label`` (str): The label for the signature
* ``algorithm``: (same as ``signature_algorithm`` above)
* ``covered_components``: A mapping of component names to their values, as covered by the signature
* ``parameters``: A mapping of signature parameters to their values, as covered by the signature, including
"alg", "created", "expires", "keyid", and "nonce". To protect against replay attacks, retrieve the "nonce"
parameter here and check that it has not been seen before.
* ``body``: The message body for messages that have a body and pass validation of the covered
content-digest; ``None`` otherwise.
:raises: ``InvalidSignature`` - raised whenever signature validation fails for any reason.
"""
body = cls.get_body(message)
if body is not None:
if "content-digest" not in require_components and '"content-digest"' not in require_components:
require_components = list(require_components) + ["content-digest"]
verifier = HTTPMessageVerifier(signature_algorithm=signature_algorithm,
key_resolver=key_resolver,
component_resolver_class=cls.component_resolver_class)
verify_results = verifier.verify(message, max_age=max_age)
if len(verify_results) != 1:
raise InvalidSignature("Multiple signatures are not supported.")
verify_result = verify_results[0]
for component_name in require_components:
component_key = component_name
if not component_key.startswith('"'):
component_key = str(http_sfv.List([http_sfv.Item(component_name)]))
if component_key not in verify_result.covered_components:
raise InvalidSignature(f"A required component, {component_key}, was not covered by the signature.")
if component_key == '"content-digest"':
if body is None:
raise InvalidSignature("Found a content-digest header in a message with no body")
digest = http_sfv.Dictionary()
digest.parse(verify_result.covered_components[component_key].encode())
if len(digest) < 1:
raise InvalidSignature("Found a content-digest header with no digests")
for k, v in digest.items():
if k not in cls._content_digest_hashers:
raise InvalidSignature(f'Unsupported content digest algorithm "{k}"')
raw_digest = v.value
hasher = cls._content_digest_hashers[k]
expect_digest = hasher(body).digest()
if raw_digest != expect_digest:
raise InvalidSignature("The content-digest header does not match the message body")
verify_result = verify_result._replace(body=body)
return verify_result | /requests_http_signature-0.7.1-py3-none-any.whl/requests_http_signature/__init__.py | 0.885489 | 0.278278 | __init__.py | pypi |
import time
import json
import logging
import jwt
import requests
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from cryptography.hazmat.backends import default_backend
# https://cloud.google.com/iap/docs/authentication-howto
log = logging.getLogger("requests_iap")
class IAPAuth(requests.auth.AuthBase):
"""Custom requests Auth class used to authenticate HTTP requests to OIDC-authenticated resources using a service account.
The major use case is to use this flow to make requests to resources behind an Identity-Aware Proxy (https://cloud.google.com/iap).
This works by generating a JWT with an additional `target_audience` claim set to the OAuth2 client id which
is signed using the GCP service account credentials.
This JWT is then exchanged for a Google-signed OIDC token for the client id specified in the JWT claims.
Authenticated requests are made by setting the token in the `Authorization: Bearer` header.
This token has roughly a 1-hour expiration and is renewed transparently by this authentication class.
The renewal interval is 30 minutes (to keep requests working with the old token for roughly 30 more minutes in case Google API is down).
This can be configured via the `jwt_soft_expiration` parameter.
"""
google_iap_jwt = None
def __init__(
self,
client_id,
service_account_secret_dict,
jwt_soft_expiration=1800,
oauth_token_uri="https://www.googleapis.com/oauth2/v4/token",
jwt_bearer_token_grant_type="urn:ietf:params:oauth:grant-type:jwt-bearer",
):
self.client_id = client_id
self.service_account_secret_dict = service_account_secret_dict
if jwt_soft_expiration > 3600:
raise ValueError(
"`jwt_soft_expiration` should NOT be larger than 3600 seconds (1 hour)!"
)
self.jwt_soft_expiration = jwt_soft_expiration
# You shouldn't need to change those
self.oauth_token_uri = oauth_token_uri
self.jwt_bearer_token_grant_type = jwt_bearer_token_grant_type
def __call__(self, r):
if IAPAuth.google_iap_jwt is None or self.is_jwt_expired(
IAPAuth.google_iap_jwt
):
try:
IAPAuth.google_iap_jwt = self.get_google_open_id_connect_token()
except requests.exceptions.RequestException:
log.exception("Google OAuth2 API returned an unexpected response!")
except Exception:
# Some token will be always better than none, so we will swallow and attempt to make the request anyway with the old token.
log.exception(
"Something terribly unexpected happened during the OIDC token generation!"
)
if IAPAuth.google_iap_jwt is None:
raise RuntimeError("OIDC token generation failed!")
r.headers["Authorization"] = "Bearer {}".format(IAPAuth.google_iap_jwt)
return r
def get_jwt_assertion(self):
message = {
"kid": self.service_account_secret_dict["private_key_id"],
"iss": self.service_account_secret_dict["client_email"],
"sub": self.service_account_secret_dict["client_email"],
"aud": self.oauth_token_uri,
"iat": int(time.time()),
"exp": int(time.time()) + 60 * 60,
"target_audience": self.client_id,
}
return jwt.encode(
message,
load_pem_private_key(
jwt.utils.force_bytes(self.service_account_secret_dict["private_key"]),
password=None,
backend=default_backend(),
),
algorithm="RS256",
)
def is_jwt_expired(self, jwt_token):
if (
jwt.decode(IAPAuth.google_iap_jwt, verify=False)["iat"]
+ self.jwt_soft_expiration
) < time.time():
return True
return False
def get_google_open_id_connect_token(self):
session = IAPAuth.retry_session()
r = session.post(
self.oauth_token_uri,
timeout=3,
data={
"assertion": self.get_jwt_assertion(),
"grant_type": self.jwt_bearer_token_grant_type,
},
)
r.raise_for_status()
log.debug("Successfully requested id_token from Google API.")
return r.json()["id_token"]
@staticmethod
def retry_session():
session = requests.Session()
retries = 3
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=0.3,
method_whitelist=False,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session | /requests_iap-0.2.1b1-py3-none-any.whl/requests_iap/iapauth.py | 0.6508 | 0.244346 | iapauth.py | pypi |
# requests-ip-rotator
A Python library to utilize AWS API Gateway's large IP pool as a proxy to generate pseudo-infinite IPs for web scraping and brute forcing.
This library will allow the user to bypass IP-based rate-limits for sites and services.
X-Forwarded-For headers are automatically randomised and applied unless given. This is because otherwise, AWS will send the client's true IP address in this header.
AWS' ApiGateway sends its requests from any available IP - and since the AWS infrastructure is so large, it is almost guarenteed to be different each time. By using ApiGateway as a proxy, we can take advantage of this to send requests from different IPs each time. Please note that these requests can be easily identified and blocked, since they are sent with unique AWS headers (i.e. "X-Amzn-Trace-Id").
---
## Installation
This package is on pypi so you can install via any of the following:
* `pip3 install requests-ip-rotator`
* `python3 -m pip install requests-ip-rotator`
## Simple Usage
```py
import requests
from requests_ip_rotator import ApiGateway
# Create gateway object and initialise in AWS
gateway = ApiGateway("https://site.com")
gateway.start()
# Assign gateway to session
session = requests.Session()
session.mount("https://site.com", gateway)
# Send request (IP will be randomised)
response = session.get("https://site.com/index.php", params={"theme": "light"})
print(response.status_code)
# Delete gateways
gateway.shutdown()
```
### Alternate Usage (auto-start and shutdown)
```py
import requests
from requests_ip_rotator import ApiGateway
with ApiGateway("https://site.com") as g:
session = requests.Session()
session.mount("https://site.com", g)
response = session.get("https://site.com/index.php")
print(response.status_code)
```
Please remember that if gateways are not shutdown via the `shutdown()` method when using method #1, you may be charged in future.
## Costs
API Gateway is free for the first million requests per region, which means that for most use cases this should be completely free.
At the time of writing, AWS charges ~$3 per million requests after the free tier has been exceeded.
If your requests involve data stream, AWS would charge data transfer fee at $0.09 per GB.
## Documentation
### AWS Authentication
It is recommended to setup authentication via environment variables. With [awscli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html), you can run `aws configure` to do this, or alternatively, you can simply set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` variables yourself.
You can find your access key ID and secret by following the [official AWS tutorial](https://docs.aws.amazon.com/powershell/latest/userguide/pstools-appendix-sign-up.html).
### Creating ApiGateway object
The ApiGateway class can be created with the following optional parameters:
| Name | Description | Required | Default
| ----------- | ----------- | ----------- | -----------
| site | The site (without path) requests will be sent to. | True |
| regions | An array of AWS regions to setup gateways in. | False | ip_rotator.DEFAULT_REGIONS
| access_key_id | AWS Access Key ID (will override env variables). | False | *Relies on env variables.*
| access_key_secret | AWS Access Key Secret (will override env variables). | False | *Relies on env variables.*
| verbose | Include status and error messages. | False | True
```python
from ip_rotator import ApiGateway, EXTRA_REGIONS, ALL_REGIONS
# Gateway to outbound HTTP IP and port for only two regions
gateway_1 = ApiGateway("http://1.1.1.1:8080", regions=["eu-west-1", "eu-west-2"])
# Gateway to HTTPS google for the extra regions pack, with specified access key pair
gateway_2 = ApiGateway("https://www.google.com", regions=EXTRA_REGIONS, access_key_id="ID", access_key_secret="SECRET")
```
### Starting API gateway
An ApiGateway object must then be started using the `start` method.
**By default, if an ApiGateway already exists for the site, it will use the existing endpoint instead of creating a new one.**
This does not require any parameters, but accepts the following:
| Name | Description | Required | Default
| ----------- | ----------- | ----------- | -----------
| endpoints | Array of pre-existing endpoints (i.e. from previous session). | False |
| force | Create a new set of endpoints, even if some already exist. | False | False
| require_manual_shutdown | Bool specifying whether Apigateways should persist `shutdown()` calls | False | False
```python
# Starts new ApiGateway instances for site, or locates existing endpoints if they already exist.
gateway_1.start()
# Starts new ApiGateway instances even if some already exist.
gateway_2.start(force=True)
```
### Sending requests
Requests are sent by attaching the ApiGateway object to a requests Session object.
The site given in `mount` must match the site passed in the `ApiGateway` constructor.
```python
import requests
# Posts a request to the site created in gateway_1. Will be sent from a random IP.
session_1 = requests.Session()
session_1.mount("http://1.1.1.1:8080", gateway_1)
session_1.post("http://1.1.1.1:8080/update.php", headers={"Hello": "World"})
# Send 127.0.0.1 as X-Forwarded-For header in outbound request (otherwise X-Forwarded-For is randomised).
session_1.post("http://1.1.1.1:8080/update.php", headers={"X-Forwarded-For", "127.0.0.1"})
# Execute Google search query from random IP
session_2 = requests.Session()
session_2.mount("https://www.google.com", gateway_2)
session_2.get("https://www.google.com/search?q=test")
```
### Closing ApiGateway Resources
It's important to shutdown the ApiGateway resources once you have finished with them, to prevent dangling public endpoints that can cause excess charges to your account.
This is done through the `shutdown` method of the ApiGateway object. It will close all resources for the regions specified in the ApiGateway object constructor.
```python
# This will shutdown all gateway proxies for "http://1.1.1.1:8080" in "eu-west-1" & "eu-west-2"
gateway_1.shutdown()
# This will shutdown all gatewy proxies for "https://www.google.com" for all regions in ip_rotator.EXTRA_REGIONS
gateway_2.shutdown()
```
Alternatively, you can selectively shutdown specific endpoints, if needed. To do this, simply pass in an array of endpoints to the shutdown() method, i.e:
```python
# This will force start a new gateway (i.e. create new endpoints even if some exist on the region already), and then delete the first 3 of them only.
gateway_3 = ApiGateway("http://1.1.1.1:8082", regions=ALL_REGIONS)
endpoints = gateway_3.start(force=True)
gateway_3.shutdown(endpoints[:3])
```
**Please bear in mind that any gateways started with the `require_manual_shutdown` parameter set to `True` will not be deleted via the `shutdown` method, and must be deleted
manually through either the AWS CLI or Website.**
## Credit
The core gateway creation and organisation code was adapter from RhinoSecurityLabs' [IPRotate Burp Extension](https://github.com/RhinoSecurityLabs/IPRotate_Burp_Extension/).
The X-My-X-Forwarded-For header forwarding concept was originally conceptualised by [ustayready](https://twitter.com/ustayready) in his [fireprox](https://github.com/ustayready/fireprox) proxy.
| /requests-ip-rotator-1.0.14.tar.gz/requests-ip-rotator-1.0.14/README.md | 0.435902 | 0.795241 | README.md | pypi |
from __future__ import unicode_literals
import requests
import hashlib
from requests.auth import AuthBase
import time
import jwt
__all__ = [
'JWTAuth',
'payload_method',
'payload_path',
'payload_body',
]
payload_method = lambda req: req.method
"""
A generator that will include the request method in the JWT payload.
>>> auth = JWTAuth('secret')
>>> auth.add_field('method', payload_method)
"""
payload_path = lambda req: req.path_url
"""
A generator that will include the request's path ('/blah/index.html') in the
JWT payload.
>>> auth = JWTAuth('secret')
>>> auth.add_field('path', payload_path)
"""
def payload_body(req):
"""
A generator that will include the sha256 signature of the request's body
in the JWT payload. This is only done if the request could have a body:
if the method is POST or PUT.
>>> auth = JWTAuth('secret')
>>> auth.add_field('body', payload_body)
"""
to_hash = req.body if type(req.body) is bytes else req.body.encode('utf-8')
if req.method in ('POST', 'PUT'):
return {
'hash': hashlib.sha256(to_hash).hexdigest(),
'alg': 'sha256',
}
class JWTAuth(AuthBase):
"""
An Authorization/Authentication system for :mod:`requests`, implementing
JSON Web Tokens.
The basic usage is this:
.. code::
auth = JWTAuth(secret)
# Maybe add more fields to the payload (see below)
resp = requests.get('http://example.com/', auth=auth)
You can add fields to the signed payload using the :meth:`expire` and
:meth:`add_field` methods.
This is a 'Custom Authentication' mechanism for Kenneth Reitz's Requests
library; see the `example in the docs
<http://docs.python-requests.org/en/latest/user/advanced/#custom-authentication>`_
for some context.
For more on JSON Web Tokens, see `the standard
<http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html>`_.
See the documentation of :mod:`PyJWT` for the list of available
algorithms.
"""
def __init__(self, secret, alg='HS256', header_format='JWT token="%s"'):
self.secret = secret
self.alg = alg
self._header_format = header_format
self._generators = {}
def add_field(self, name, generator):
"""
Add a field to the JWT payload.
- name: The name of the field. Should be a string.
- generator: a value or generator, the value of the field.
If `generator` is callable, then each time a request is made with
this JWTAuth, the generator will be called with one argument:
a `PreparedRequest` object. See `this page`_ for a list of the
available properties:
.. _`this page`: http://docs.python-requests.org/en/latest/api/#requests.PreparedRequest
For instance, here is field that will have your JWT sign the path that
it is requesting:
.. code::
auth.add_field('path', lambda req: req.path_url)
If `generator` is not callable, it will be included directly in the
JWT payload. It could be a string or a JSON-serializable object.
This module provides several payload fields ready to go:
:func:`payload_method`, :func:`payload_path`, and :func:`payload_body`.
:meth:`expire` is also a wrapper around ``add_field``.
"""
self._generators[name] = generator
def expire(self, secs):
"""
Adds the standard 'exp' field, used to prevent replay attacks.
Adds the 'exp' field to the payload. When a request is made,
the field says that it should expire at now + `secs` seconds.
Of course, this provides no protection unless the server reads
and interprets this field.
"""
self.add_field('exp',
lambda req: int(time.time() + secs))
def set_header_format(self, new_format):
"""
Modify the contents of the ``Authorization:`` header. This must
be a format string with one ``%s`` in it.
"""
self._header_format = new_format
def _generate(self, request):
"""
Generate a payload for the given request.
"""
payload = {}
for field, gen in self._generators.items():
value = None
if callable(gen):
value = gen(request)
else:
value = gen
if value:
payload[field] = value
return payload
def __call__(self, request):
"""
Called by requests when a request is made.
The `request` parameter is a PreparedRequest object.
Prepares the payload using the field generators, and then
adds an `Authorization` header to the request.
"""
payload = self._generate(request)
token = jwt.encode(payload, self.secret, self.alg)
# jwt.encode() -> str in pyJWT>=2, vs bytes in pyJWT<2
token_str = token.decode('ascii') if isinstance(token, bytes) else token
request.headers['Authorization'] = self._header_format % token_str
return request | /requests_jwt-0.6.0-py3-none-any.whl/requests_jwt.py | 0.816991 | 0.203213 | requests_jwt.py | pypi |
from time import time
from typing import List
class LimitDto:
def __init__(self, max_requests, requests_made, reset_timestamp):
self.max_requests = max_requests
self.requests_made = requests_made
self.reset_timestamp = reset_timestamp
def __repr__(self):
return str(self.__dict__)
def define_limiter_headers(max_requests, requests_made, seconds_until_reset):
def define_limiter_headers_wrapper():
return LimitDto(
max_requests,
requests_made,
reset_timestamp=int(time()) + seconds_until_reset
)
return define_limiter_headers_wrapper
class RateLimit:
def __init__(self, name, formatter, defaults):
self.name = name
self.max_requests: List = list(defaults[0])
self.requests_made: List = list(defaults[1])
self.reset_timestamp: List = [time() + default for default in defaults[2]]
self._formatter = formatter
def __repr__(self):
return str(self.__dict__)
def can_request(self) -> bool:
if not (self.max_requests and self.requests_made and self.reset_timestamp):
return True
for max_requests, requests_made in zip(self.max_requests, self.requests_made):
if requests_made >= max_requests:
return False
return True
def is_initialized(self):
if self.max_requests and self.requests_made and self.reset_timestamp:
return True
return False
def set_formatter(self, callback):
self._formatter = callback
def register_request(self, request=None, headers: dict = None):
if None not in (request, headers):
raise Exception("Only one argument can't be none")
if request is not None:
headers = request.headers
limits: List[LimitDto] = self._formatter(headers)
for idx, limit in enumerate(limits):
if idx > len(self.requests_made)-1:
self.requests_made.append(limit.requests_made)
self.max_requests.append(limit.max_requests)
self.reset_timestamp.append(limit.reset_timestamp)
self.requests_made[idx] = limit.requests_made
self.max_requests[idx] = limit.max_requests
if limit.requests_made == 1:
self.reset_timestamp[idx] = limit.reset_timestamp
@property
def reset_in(self):
times = []
for reset_time in self.reset_timestamp:
times.append(reset_time - int(time()))
return times
@property
def time_until_new_request_is_possible(self):
for idx, (max_requests, requests_made) in enumerate(zip(self.max_requests, self.requests_made)):
if requests_made >= max_requests:
return self.reset_timestamp[idx] - int(time()) | /requests-limiter-0.0.1.tar.gz/requests-limiter-0.0.1/limiter/rate_limit.py | 0.819965 | 0.255727 | rate_limit.py | pypi |
import requests
import time
from .rsa_sign import RSARawSigner
# Python 2/3 differences
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class MAuth(requests.auth.AuthBase):
"""
Custom requests authorizer for MAuth
"""
def __init__(self, app_uuid, private_key_data):
"""
Create a new MAuth Instance
:param str app_uuid: The Application UUID (or APP_UUID) for the application
:param str private_key_data: Content of the Private Key File
"""
self.app_uuid = app_uuid
self.signer = RSARawSigner(private_key_data)
def __call__(self, r):
"""Call override, the entry point for a custom auth object
:param requests.models.PreparedRequest r: the Request object
"""
r.headers.update(self.make_headers(r))
return r
def make_headers(self, r):
"""Make headers for the request.
:param requests.models.PreparedRequest r: the Request object
"""
# Split the path from the scheme, query string etc
url_path = urlparse(r.url).path
signature, secs = self.make_signature_string(r.method, url_path, r.body)
signed = self.signer.sign(signature)
headers = self.make_authentication_headers(signed, secs)
return headers
def make_authentication_headers(self, signed_string, seconds_since_epoch):
"""Makes headers of the form
::
x-mws-authentication: MWS {app_uuid}:{signed_string}
x-mws-time: {time_since_epoch}
:param str signed_string: The signed string for the header
:param int seconds_since_epoch: The number of seconds since the Epoch
"""
return {'X-MWS-Authentication': 'MWS %s:%s' % (self.app_uuid, signed_string,),
'X-MWS-Time': str(seconds_since_epoch)
}
def make_signature_string(self, verb, url_path, body, seconds_since_epoch=None):
"""Makes a signature string for signing a request; the request is expected to be of the form:
::
string_to_sign =
Http_Verb + "\\n" +
Resource URL path (no host, port or query string; first "/" is included) + "\\n" +
message_body_string + "\\n" +
app_uuid + "\\n" +
Current_Seconds_Since_Epoch
Returns the encoded string and the seconds-since-epoch used in request.
Providing the seconds_since_epoch is helpful for testing, especially in checking
against the output of the ruby mauth command-line client.
:param verb: The HTTP Verb for the Request (eg GET, POST)
:type verb: str
:param url_path: The URL Path for the Request (eg '/studies.json')
:type url_path: str
:param body: The body of the request
:type body: str
:param seconds_since_epoch: Specify the Seconds Since Epoch
:type seconds_since_epoch: int
"""
if seconds_since_epoch is None:
seconds_since_epoch = int(time.time())
if isinstance(body, bytes):
body = body.decode('utf-8')
vals = dict(verb=verb,
url_path=url_path,
body=body or '',
app_uid=self.app_uuid,
seconds_since_epoch=seconds_since_epoch)
string_to_sign = u'{verb}\n{url_path}\n{body}\n{app_uid}\n{seconds_since_epoch!s}'.format(**vals)
return string_to_sign, seconds_since_epoch | /requests_mauth-1.1.0.tar.gz/requests_mauth-1.1.0/requests_mauth/client.py | 0.762247 | 0.246318 | client.py | pypi |
__author__ = 'isparks'
# This module exists to reproduce, with the rsa library, the raw signature required by MAuth
# which in OpenSSL is created with private_encrypt(hash). It provides an RSA sign class built from
# code that came from https://www.dlitz.net/software/pycrypto/api/current/ no copyright of that original
# code is claimed.
from hashlib import sha512
from rsa import common, core, transform, PrivateKey
import base64
def make_bytes(val):
"""Ensure in python 2/3 we are working with bytes when we need to
:param str val: The supplied value (string-like)
"""
try:
if isinstance(val, unicode):
return val.encode('utf-8')
except NameError:
if isinstance(val, bytes):
return val
elif isinstance(val, str):
return val.encode('utf-8')
return val
class RSARawSigner(object):
"""
Implements the equivalent of the OpenSSL private_encrypt method
"""
def __init__(self, private_key_data):
"""
:param private_key_data:
"""
self.private_key_data = private_key_data
self.pk = PrivateKey.load_pkcs1(private_key_data, 'PEM')
def sign(self, string_to_sign):
"""Sign the data in a emulation of the OpenSSL private_encrypt method
:param str string_to_sign: The string to Sign using the RSA private_encrypt method
:rtype: str
"""
string_to_sign = make_bytes(string_to_sign)
hashed = sha512(string_to_sign).hexdigest().encode('US-ASCII')
keylength = common.byte_size(self.pk.n)
padded = self.pad_for_signing(hashed, keylength)
padded = make_bytes(padded)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, self.pk.d, self.pk.n)
signature = transform.int2bytes(encrypted, keylength)
signature = base64.b64encode(signature).decode('US-ASCII').replace('\n', '')
return signature
def pad_for_signing(self, message, target_length):
"""Pulled from rsa pkcs1.py,
Pads the message for signing, returning the padded message.
The padding is always a repetition of FF bytes::
00 01 PADDING 00 MESSAGE
Sample code::
>>> block = RSARawSigner.pad_for_signing('hello', 16)
>>> len(block)
16
>>> block[0:2]
'\x00\x01'
>>> block[-6:]
'\x00hello'
>>> block[2:-6]
'\xff\xff\xff\xff\xff\xff\xff\xff'
:param message: message to pad in readiness for signing
:type message: str
:param target_length: target length for padded string
:type target_length: int
:rtype: str
:return: suitably padded string
"""
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength: # pragma: no cover
raise OverflowError('%i bytes needed for message, but there is only'
' space for %i' % (msglength, max_msglength))
padding_length = target_length - msglength - 3
return b''.join([b'\x00\x01',
padding_length * b'\xff',
b'\x00',
message]) | /requests_mauth-1.1.0.tar.gz/requests_mauth-1.1.0/requests_mauth/rsa_sign.py | 0.809728 | 0.350227 | rsa_sign.py | pypi |
import base64
import logging
import ntlm_auth.ntlm
from .core import NtlmCompatibility
logger = logging.getLogger(__name__)
class HttpNtlmContext(ntlm_auth.ntlm.NtlmContext):
"""Thin wrapper over ntlm_auth.ntlm.NtlmContext for HTTP"""
def __init__(
self,
username,
password,
domain=None,
workstation=None,
cbt_data=None,
ntlm_compatibility=NtlmCompatibility.NTLMv2_DEFAULT,
auth_type=None,
server_certificate_hash=None,
):
r"""
Initialises a NTLM context to use when authenticating using the NTLM
protocol.
Initialises the NTLM context to use when sending and receiving messages
to and from the server. You should be using this object as it supports
NTLMv2 authenticate and it easier to use than before. It also brings in
the ability to use signing and sealing with session_security and
generate a MIC structure.
:param username: The username to authenticate with
:param password: The password for the username
:param domain: The domain part of the username (None if n/a)
:param workstation: The localworkstation (None if n/a)
:param cbt_data: A GssChannelBindingsStruct or None to bind channel
data with the auth process
:param ntlm_compatibility: (Default 3)
The Lan Manager Compatibility Level to use with the auth message
This is set by an Administrator in the registry key
'HKLM\SYSTEM\CurrentControlSet\Control\Lsa\LmCompatibilityLevel'
The values correspond to the following;
0 : LM and NTLMv1
1 : LM, NTLMv1 and NTLMv1 with Extended Session Security
2 : NTLMv1 and NTLMv1 with Extended Session Security
3-5 : NTLMv2 Only
Note: Values 3 to 5 are no different from a client perspective
:param auth_type: either 'NTLM' or 'Negotiate'
:param server_certificate_hash: the hash of the certificate from peer
"""
if auth_type not in ("NTLM", "Negotiate"):
raise ValueError(
'Expected "NTLM" or "Negotiate" auth_type, got %s'.format(auth_type)
)
self._auth_type = auth_type
self._challenge_token = None
self._server_certificate_hash = server_certificate_hash
super(HttpNtlmContext, self).__init__(
username,
password,
domain=domain,
workstation=workstation,
cbt_data=cbt_data,
ntlm_compatibility=ntlm_compatibility,
)
@property
def negotiate_message(self):
return self._negotiate_message
@negotiate_message.setter
def negotiate_message(self, value):
self._negotiate_message = value
@property
def challenge_message(self):
return self._challenge_message
@challenge_message.setter
def challenge_message(self, value):
self._challenge_message = value
@property
def authenticate_message(self):
return self._authenticate_message
@authenticate_message.setter
def authenticate_message(self, value):
self._authenticate_message = value
@property
def session_security(self):
return self._session_security
@session_security.setter
def session_security(self, value):
self._session_security = value
def create_negotiate_message(self):
msg = self.step()
return base64.b64encode(msg)
def parse_challenge_message(self, msg2):
self._challenge_token = base64.b64decode(msg2)
def create_authenticate_message(self):
msg = self.step(self._challenge_token)
return base64.b64encode(msg)
def get_negotiate_header(self):
negotiate_message = self.create_negotiate_message().decode("ascii")
result = u"{auth_type} {negotiate_message}".format(
auth_type=self._auth_type, negotiate_message=negotiate_message
)
return result
def set_challenge_from_header(self, raw_header_value):
if not raw_header_value:
return None
match_strings = (
"{} ".format(self._auth_type),
"{}: {} ".format("Proxy-Authenticate", self._auth_type),
"{}: {} ".format("WWW-Authenticate", self._auth_type),
)
for header_value in raw_header_value.split(","):
header_value = header_value.strip()
for auth_strip in match_strings:
if header_value.startswith(auth_strip):
challenge = header_value.replace(auth_strip, "")
return self.parse_challenge_message(challenge)
return None
def get_authenticate_header(self):
authenticate_message = self.create_authenticate_message()
authenticate_message = authenticate_message.decode("ascii")
return u"{auth_type} {authenticate_message}".format(
auth_type=self._auth_type, authenticate_message=authenticate_message
) | /requests_ntlm3-6.1.3b1.tar.gz/requests_ntlm3-6.1.3b1/requests_ntlm3/dance.py | 0.756088 | 0.152631 | dance.py | pypi |
import time
from datetime import datetime
import random
import urllib
from urlparse import urlparse, urlunparse, parse_qs, urlsplit, urlunsplit
from auth import Token, Consumer
from auth import to_utf8, escape
from auth import SignatureMethod_HMAC_SHA1
class CustomSignatureMethod_HMAC_SHA1(SignatureMethod_HMAC_SHA1):
def signing_base(self, request, consumer, token):
"""
This method generates the OAuth signature. It's defined here to avoid circular imports.
"""
sig = (
escape(request.method),
escape(OAuthHook.get_normalized_url(request.url)),
escape(OAuthHook.get_normalized_parameters(request)),
)
key = '%s&' % escape(consumer.secret)
if token is not None:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
class OAuthHook(object):
OAUTH_VERSION = '1.0'
header_auth = False
signature = CustomSignatureMethod_HMAC_SHA1()
consumer_key = None
consumer_secret = None
def __init__(self, access_token=None, access_token_secret=None, consumer_key=None, consumer_secret=None, header_auth=None):
"""
Consumer is compulsory, while the user's Token can be retrieved through the API
"""
if access_token is not None:
self.token = Token(access_token, access_token_secret)
else:
self.token = None
if consumer_key is None and consumer_secret is None:
consumer_key = self.consumer_key
consumer_secret = self.consumer_secret
if header_auth is not None:
self.header_auth = header_auth
self.consumer = Consumer(consumer_key, consumer_secret)
@staticmethod
def _split_url_string(query_string):
"""
Turns a `query_string` into a Python dictionary with unquoted values
"""
parameters = parse_qs(to_utf8(query_string), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
@staticmethod
def get_normalized_parameters(request):
"""
Returns a string that contains the parameters that must be signed.
This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1
"""
# See issues #10 and #12
if ('Content-Type' not in request.headers or \
request.headers.get('Content-Type').startswith('application/x-www-form-urlencoded')) \
and not isinstance(request.data, basestring):
data_and_params = dict(request.data.items() + request.params.items())
for key,value in data_and_params.items():
request.data_and_params[to_utf8(key)] = to_utf8(value)
if request.data_and_params.has_key('oauth_signature'):
del request.data_and_params['oauth_signature']
items = []
for key, value in request.data_and_params.iteritems():
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((key, value))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((key, value))
else:
items.extend((key, item) for item in value)
# Include any query string parameters included in the url
query_string = urlparse(request.url)[4]
items.extend([(to_utf8(k), to_utf8(v)) for k, v in OAuthHook._split_url_string(query_string).items()])
items.sort()
return urllib.urlencode(items).replace('+', '%20').replace('%7E', '~')
@staticmethod
def get_normalized_url(url):
"""
Returns a normalized url, without params
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (url, scheme))
# Normalized URL excludes params, query, and fragment.
return urlunparse((scheme, netloc, path, None, None, None))
@staticmethod
def to_url(request):
"""Serialize as a URL for a GET request."""
scheme, netloc, path, query, fragment = urlsplit(to_utf8(request.url))
query = parse_qs(query)
for key, value in request.data_and_params.iteritems():
query.setdefault(key, []).append(value)
query = urllib.urlencode(query, True)
return urlunsplit((scheme, netloc, path, query, fragment))
@staticmethod
def to_postdata(request):
"""Serialize as post data for a POST request. This serializes data and params"""
# tell urlencode to convert each sequence element to a separate parameter
return urllib.urlencode(request.data_and_params, True).replace('+', '%20')
@staticmethod
def authorization_header(oauth_params):
"""Return Authorization header"""
authorization_headers = 'OAuth realm="",'
authorization_headers += ','.join(['{0}="{1}"'.format(k, urllib.quote(str(v)))
for k, v in oauth_params.items()])
return authorization_headers
def __call__(self, request):
"""
Pre-request hook that signs a Python-requests Request for OAuth authentication
"""
# These checkings are necessary because type inconsisntecy of requests library
# See request Github issue #230 https://github.com/kennethreitz/requests/pull/230
if not request.params:
request.params = {}
if not request.data:
request.data = {}
if isinstance(request.params, list):
request.params = dict(request.params)
if isinstance(request.data, list):
request.data = dict(request.data)
# Dictionary to OAuth1 signing params
request.oauth_params = {}
# Adding OAuth params
request.oauth_params['oauth_consumer_key'] = self.consumer.key
request.oauth_params['oauth_timestamp'] = str(int(time.time()))
request.oauth_params['oauth_nonce'] = str(random.randint(0, 100000000))
request.oauth_params['oauth_version'] = self.OAUTH_VERSION
if self.token:
request.oauth_params['oauth_token'] = self.token.key
if 'oauth_verifier' in request.data:
request.oauth_params['oauth_verifier'] = request.data.pop('oauth_verifier')
request.oauth_params['oauth_signature_method'] = self.signature.name
# oauth_callback is an special parameter, we remove it out of the body
# If it needs to go in the body, it will be overwritten later, otherwise not
if 'oauth_callback' in request.data:
request.oauth_params['oauth_callback'] = request.data.pop('oauth_callback')
if 'oauth_callback' in request.params:
request.oauth_params['oauth_callback'] = request.params.pop('oauth_callback')
request.data_and_params = request.oauth_params.copy()
request.oauth_params['oauth_signature'] = self.signature.sign(request, self.consumer, self.token)
request.data_and_params['oauth_signature'] = request.oauth_params['oauth_signature']
if self.header_auth:
request.headers['Authorization'] = self.authorization_header(request.oauth_params)
elif request.method in ("GET", "DELETE"):
request.url = self.to_url(request)
elif ('Content-Type' not in request.headers or \
request.headers['Content-Type'] != 'application/x-www-form-urlencoded') \
and not isinstance(request.data, basestring):
# You can pass a string as data. See issues #10 and #12
request.url = self.to_url(request)
request.data = {}
else:
request.data = request.data_and_params
return request | /requests-oauth-0.4.1.tar.gz/requests-oauth-0.4.1/oauth_hook/hook.py | 0.632162 | 0.167866 | hook.py | pypi |
from furl import Path, furl # type: ignore[import]
def well_known_uri(origin: str, name: str, at_root: bool = True) -> str:
"""Return the location of a well-known document on an origin url.
See [RFC8615](https://datatracker.ietf.org/doc/html/rfc8615) and [OIDC Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
Args:
origin: origin to use to build the well-known uri.
name: document name to use to build the well-known uri.
at_root: if `True`, assume the well-known document is at root level (as defined in [RFC8615](https://datatracker.ietf.org/doc/html/rfc8615)).
If `False`, assume the well-known location is per-directory, as defined in [OpenID Connect Discovery 1.0](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
Returns:
the well-know uri, relative to origin, where the well-known document named `name` should be found.
"""
url = furl(origin)
if at_root:
url.path = Path(".well-known") / url.path / name
else:
url.path.add(Path(".well-known") / name)
return str(url)
def oidc_discovery_document_url(issuer: str) -> str:
"""Construct the OIDC discovery document url for a given `issuer`.
Given an `issuer` identifier, return the standardised URL where the OIDC
discovery document can be retrieved.
The returned URL is biuilt as specified in [OpenID Connect Discovery 1.0](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
Args:
issuer: an OIDC Authentication Server `issuer`
Returns:
the standardised discovery document URL. Note that no attempt to fetch this document is made.
"""
return well_known_uri(issuer, "openid-configuration", at_root=False)
def oauth2_discovery_document_url(issuer: str) -> str:
"""Construct the standardised OAuth 2.0 discovery document url for a given `issuer`.
Based an `issuer` identifier, returns the standardised URL where the OAuth20
server metadata can be retrieved.
The returned URL is built as specified in [RFC8414](https://datatracker.ietf.org/doc/html/rfc8414).
Args:
issuer: an OAuth20 Authentication Server `issuer`
Returns:
the standardised discovery document URL. Note that no attempt to fetch this document is made.
"""
return well_known_uri(issuer, "oauth-authorization-server", at_root=True) | /requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/discovery.py | 0.853532 | 0.428891 | discovery.py | pypi |
from __future__ import annotations
import re
import secrets
from datetime import datetime
from typing import Any, Callable, Iterable, Mapping
from binapy import BinaPy
from furl import furl # type: ignore[import]
from jwskate import JweCompact, Jwk, Jwt, SignedJwt
from typing_extensions import Literal
from .exceptions import (
AuthorizationResponseError,
ConsentRequired,
InteractionRequired,
LoginRequired,
MismatchingIssuer,
MismatchingState,
MissingAuthCode,
MissingIssuer,
SessionSelectionRequired,
)
from .utils import accepts_expires_in
class PkceUtils:
"""Contains helper methods for PKCE, as described in RFC7636.
See [RFC7636](https://tools.ietf.org/html/rfc7636).
"""
code_verifier_re = re.compile(r"^[a-zA-Z0-9_\-~.]{43,128}$")
"""A regex that matches valid code verifiers."""
@classmethod
def generate_code_verifier(cls) -> str:
"""Generate a valid `code_verifier`.
Returns:
a code_verifier ready to use for PKCE
"""
return secrets.token_urlsafe(96)
@classmethod
def derive_challenge(cls, verifier: str | bytes, method: str = "S256") -> str:
"""Derive the `code_challenge` from a given `code_verifier`.
Args:
verifier: a code verifier
method: the method to use for deriving the challenge. Accepts 'S256' or 'plain'.
Returns:
a code_challenge derived from the given verifier
"""
if isinstance(verifier, bytes):
verifier = verifier.decode()
if not cls.code_verifier_re.match(verifier):
raise ValueError(
f"Invalid code verifier, does not match {cls.code_verifier_re}",
verifier,
)
if method == "S256":
return BinaPy(verifier).to("sha256").to("b64u").ascii()
elif method == "plain":
return verifier
else:
raise ValueError("Unsupported code_challenge_method", method)
@classmethod
def generate_code_verifier_and_challenge(cls, method: str = "S256") -> tuple[str, str]:
"""Generate a valid `code_verifier` and derive its `code_challenge`.
Args:
method: the method to use for deriving the challenge. Accepts 'S256' or 'plain'.
Returns:
a (code_verifier, code_challenge) tuple.
"""
verifier = cls.generate_code_verifier()
challenge = cls.derive_challenge(verifier, method)
return verifier, challenge
@classmethod
def validate_code_verifier(
cls, verifier: str, challenge: str, method: str = "S256"
) -> bool:
"""Validate a `code_verifier` against a `code_challenge`.
Args:
verifier: the `code_verifier`, exactly as submitted by the client on token request.
challenge: the `code_challenge`, exactly as submitted by the client on authorization request.
method: the method to use for deriving the challenge. Accepts 'S256' or 'plain'.
Returns:
`True` if verifier is valid, or `False` otherwise
"""
return (
cls.code_verifier_re.match(verifier) is not None
and cls.derive_challenge(verifier, method) == challenge
)
class AuthorizationResponse:
"""Represent a successful Authorization Response.
An Authorization Response is the redirection initiated by the AS
to the client's redirection endpoint (redirect_uri) after an Authorization Request.
This Response is typically created with a call to `AuthorizationRequest.validate_callback()` once the call
to the client Redirection Endpoint is made.
AuthorizationResponse contains the following, all accessible as attributes:
- all the parameters that have been returned by the AS, most notably the `code`, and optional parameters such as `state`.
- the redirect_uri that was used for the Authorization Request
- the code_verifier matching the code_challenge that was used for the Authorization Request
Parameters `redirect_uri` and `code_verifier` must be those from the matching `AuthorizationRequest`.
All other parameters including `code` and `state` must be those extracted from the Authorization Response parameters.
Args:
code: the authorization code returned by the AS
redirect_uri: the redirect_uri that was passed as parameter in the AuthorizationRequest
code_verifier: the code_verifier matching the code_challenge that was passed as parameter in the AuthorizationRequest
state: the state returned by the AS
**kwargs: other parameters as returned by the AS
Usage:
```python
request = AuthorizationRequest(
client_id, scope="openid", redirect_uri="http://localhost:54121/callback"
)
webbrowser.open(request) # open the authorization request in a browser
response_uri = ... # at this point, manage to get the response uri
response = request.validate_callback(
response_uri
) # get an AuthorizationResponse at this point
client = OAuth2Client(token_endpoint, auth=(client_id, client_secret))
client.authorization_code(
response
) # you can pass this response on a call to `OAuth2Client.authorization_code()`
```
"""
def __init__(
self,
code: str,
redirect_uri: str | None = None,
code_verifier: str | None = None,
state: str | None = None,
nonce: str | None = None,
acr_values: Iterable[str] | None = None,
max_age: int | None = None,
**kwargs: str,
):
self.code = code
self.redirect_uri = redirect_uri
self.code_verifier = code_verifier
self.state = state
self.nonce = nonce
self.acr_values = list(acr_values) if acr_values is not None else None
self.max_age = max_age
self.others = kwargs
def __getattr__(self, item: str) -> str | None:
"""Make additional parameters available as attributes.
Args:
item: the attribute name
Returns:
the attribute value, or None if it isn't part of the returned attributes
"""
return self.others.get(item)
class AuthorizationRequest:
"""Represents an Authorization Request.
This class makes it easy to generate valid Authorization Request URI (possibly including a state, nonce, PKCE, and custom args),
to store all parameters, and to validate an Authorization Response.
All parameters passed at init time will be included in the request query parameters as-is,
excepted for a few parameters which have a special behaviour:
* `state`: if True (default), a random state parameter will be generated for you. You may pass your own state as `str`,
or set it to `None` so that the state parameter will not be included in the request. You may access that state in the
`state` attribute from this request.
* `nonce`: if True (default) and scope includes 'openid', a random nonce will be generated and included in the request.
You may access that nonce in the `nonce` attribute from this request.
* `code_verifier`: if `None`, and `code_challenge_method` is `'S256'` or `'plain'`, a valid `code_challenge`
and `code_verifier` for PKCE will be automatically generated, and the `code_challenge` will be included
in the request. You may pass your own `code_verifier` as a `str` parameter, in which case the appropriate
`code_challenge` will be included in the request, according to the `code_challenge_method`.
Args:
authorization_endpoint: the uri for the authorization endpoint.
client_id: the client_id to include in the request.
redirect_uri: the redirect_uri to include in the request. This is required in OAuth 2.0 and optional
in OAuth 2.1. Pass `None` if you don't need any redirect_uri in the Authorization Request.
scope: the scope to include in the request, as an iterable of `str`, or a single space-separated `str`.
response_type: the response type to include in the request.
state: the state to include in the request, or `True` to autogenerate one (default).
nonce: the nonce to include in the request, or `True` to autogenerate one (default).
code_verifier: the code verifier to include in the request. If left as `None` and `code_challenge_method` is set, a valid code_verifier will be generated.
code_challenge_method: the method to use to derive the `code_challenge` from the `code_verifier`.
acr_values: requested Authentication Context Class Reference values.
issuer: Issuer Identifier value from the OAuth/OIDC Server, if using Server Issuer Identification.
**kwargs: extra parameters to include in the request, as-is.
"""
exception_classes: dict[str, type[Exception]] = {
"interaction_required": InteractionRequired,
"login_required": LoginRequired,
"session_selection_required": SessionSelectionRequired,
"consent_required": ConsentRequired,
}
@classmethod
def generate_state(cls) -> str:
"""Generate a random `state` parameter."""
return secrets.token_urlsafe(32)
@classmethod
def generate_nonce(cls) -> str:
"""Generate a random `nonce`."""
return secrets.token_urlsafe(32)
def __init__(
self,
authorization_endpoint: str,
client_id: str,
redirect_uri: str | None = None,
scope: None | str | Iterable[str] = "openid",
response_type: str = "code",
state: str | Literal[True] | None = True,
nonce: str | Literal[True] | None = True,
code_verifier: str | None = None,
code_challenge_method: str | None = "S256",
acr_values: str | Iterable[str] | None = None,
max_age: int | None = None,
issuer: str | None = None,
authorization_response_iss_parameter_supported: bool = False,
**kwargs: Any,
) -> None:
if authorization_response_iss_parameter_supported and not issuer:
raise ValueError(
"When 'authorization_response_iss_parameter_supported' is True, you must provide the expected 'issuer' as parameter."
)
if state is True:
state = self.generate_state()
if scope is not None:
if isinstance(scope, str):
scope = scope.split(" ")
else:
scope = tuple(scope)
if nonce is True:
if scope is not None and "openid" in scope:
nonce = self.generate_nonce()
else:
nonce = None
if acr_values is not None:
if isinstance(acr_values, str):
acr_values = acr_values.split()
elif not isinstance(acr_values, list):
acr_values = list(acr_values)
if "code_challenge" in kwargs:
raise ValueError(
"A code_challenge must not be passed as parameter. "
"Pass the code_verifier instead, and the appropriate code_challenge "
"will automatically be derived from it and included in the request, "
"based on code_challenge_method."
)
if not code_challenge_method:
code_verifier = code_challenge = code_challenge_method = None
else:
if not code_verifier:
code_verifier = PkceUtils.generate_code_verifier()
code_challenge = PkceUtils.derive_challenge(code_verifier, code_challenge_method)
if max_age is not None:
if max_age < 0:
raise ValueError(
"The `max_age` parameter is a number of seconds and cannot be negative."
)
self.authorization_endpoint = authorization_endpoint
self.client_id = client_id
self.redirect_uri = redirect_uri
self.issuer = issuer
self.response_type = response_type
self.scope = scope
self.state = state
self.nonce = nonce
self.code_verifier = code_verifier
self.code_challenge = code_challenge
self.code_challenge_method = code_challenge_method
self.acr_values = acr_values
self.max_age = max_age
self.authorization_response_iss_parameter_supported = (
authorization_response_iss_parameter_supported
)
self.kwargs = kwargs
self.args = dict(
client_id=client_id,
redirect_uri=redirect_uri,
response_type=response_type,
scope=" ".join(scope) if scope is not None else None,
state=state,
nonce=nonce,
code_challenge=code_challenge,
code_challenge_method=code_challenge_method,
acr_values=" ".join(acr_values) if acr_values is not None else None,
max_age=max_age,
**kwargs,
)
def as_dict(self) -> Mapping[str, Any]:
"""Return a dict with all the parameters used to init this Authorization Request.
Used for serialization of this request. A new AuthorizationRequest initialized with the same parameters will be
equal to this one.
Returns:
a dict of parameters
"""
return {
"authorization_endpoint": self.authorization_endpoint,
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"scope": self.scope,
"response_type": self.response_type,
"state": self.state,
"nonce": self.nonce,
"code_verifier": self.code_verifier,
"code_challenge_method": self.code_challenge_method,
"issuer": self.issuer,
"authorization_response_iss_parameter_supported": self.authorization_response_iss_parameter_supported,
"acr_values": self.acr_values,
"max_age": self.max_age,
**self.kwargs,
}
def sign_request_jwt(
self,
jwk: Jwk | dict[str, Any],
alg: str | None = None,
lifetime: int | None = None,
) -> SignedJwt:
"""Sign the `request` object that matches this Authorization Request parameters.
Args:
jwk: the JWK to use to sign the request
alg: the alg to use to sign the request, if the passed `jwk` has no `alg` parameter.
lifetime: an optional number of seconds of validity for the signed reqeust. If present, `iat` an `exp` claims will be included in the signed JWT.
Returns:
a `Jwt` that contains the signed request object.
"""
claims = {key: val for key, val in self.args.items() if val is not None}
if lifetime:
claims["iat"] = Jwt.timestamp()
claims["exp"] = Jwt.timestamp(lifetime)
return Jwt.sign(
claims,
key=jwk,
alg=alg,
)
def sign(
self,
jwk: Jwk | dict[str, Any],
alg: str | None = None,
lifetime: int | None = None,
) -> RequestParameterAuthorizationRequest:
"""Sign this Authorization Request and return a new one.
This replaces all parameters with a signed `request` JWT.
Args:
jwk: the JWK to use to sign the request
alg: the alg to use to sign the request, if the passed `jwk` has no `alg` parameter.
lifetime: lifetime of the resulting Jwt (used to calculate the 'exp' claim). By default, don't use an 'exp' claim.
Returns:
the signed Authorization Request
"""
request_jwt = self.sign_request_jwt(jwk, alg, lifetime)
return RequestParameterAuthorizationRequest(
authorization_endpoint=self.authorization_endpoint,
client_id=self.client_id,
request=str(request_jwt),
expires_at=request_jwt.expires_at,
)
def sign_and_encrypt_request_jwt(
self,
sign_jwk: Jwk | dict[str, Any],
enc_jwk: Jwk | dict[str, Any],
sign_alg: str | None = None,
enc_alg: str | None = None,
enc: str = "A128CBC-HS256",
lifetime: int | None = None,
) -> JweCompact:
"""Sign and encrypt a `request` object for this Authorization Request.
The signed `request` will contain the same parameters as this AuthorizationRequest.
Args:
sign_jwk: the JWK to use to sign the request
enc_jwk: the JWK to use to encrypt the request
sign_alg: the alg to use to sign the request, if the passed `jwk` has no `alg` parameter.
enc_alg: the alg to use to encrypt the request, if the passed `jwk` has no `alg` parameter.
enc: the encoding to use to encrypt the request, if the passed `jwk` has no `enc` parameter.
lifetime: lifetime of the resulting Jwt (used to calculate the 'exp' claim). By default, do not include an 'exp' claim.
Returns:
the signed and encrypted request object, as a `jwskate.Jwt`
"""
claims = {key: val for key, val in self.args.items() if val is not None}
if lifetime:
claims["iat"] = Jwt.timestamp()
claims["exp"] = Jwt.timestamp(lifetime)
return Jwt.sign_and_encrypt(
claims=claims,
sign_key=sign_jwk,
sign_alg=sign_alg,
enc_key=enc_jwk,
enc_alg=enc_alg,
enc=enc,
)
def sign_and_encrypt(
self,
sign_jwk: Jwk | dict[str, Any],
enc_jwk: Jwk | dict[str, Any],
sign_alg: str | None = None,
enc_alg: str | None = None,
enc: str = "A128CBC-HS256",
lifetime: int | None = None,
) -> RequestParameterAuthorizationRequest:
"""Sign and encrypt the current Authorization Request.
This replaces all parameters with a matching `request` object.
Args:
sign_jwk: the JWK to use to sign the request
enc_jwk: the JWK to use to encrypt the request
sign_alg: the alg to use to sign the request, if the passed `jwk` has no `alg` parameter.
enc_alg: the alg to use to encrypt the request, if the passed `jwk` has no `alg` parameter.
enc: the encoding to use to encrypt the request, if the passed `jwk` has no `enc` parameter.
lifetime: lifetime of the resulting Jwt (used to calculate the 'exp' claim). By default, do not include an 'exp' claim.
Returns:
the same AuthorizationRequest, with a request object as parameter
"""
request_jwt = self.sign_and_encrypt_request_jwt(
sign_jwk=sign_jwk,
enc_jwk=enc_jwk,
sign_alg=sign_alg,
enc_alg=enc_alg,
enc=enc,
lifetime=lifetime,
)
return RequestParameterAuthorizationRequest(
authorization_endpoint=self.authorization_endpoint,
client_id=self.client_id,
request=str(request_jwt),
)
def validate_callback(self, response: str) -> AuthorizationResponse:
"""Validate an Authorization Response against this Request.
Validate a given Authorization Response URI against this Authorization
Request, and return an [AuthorizationResponse][requests_oauth2client.authorization_request.AuthorizationResponse].
This includes matching the `state` parameter, checking for returned errors, and extracting the returned `code`
and other parameters.
Args:
response: the Authorization Response URI. This can be the full URL, or just the query parameters.
Returns:
the extracted code, if all checks are successful
Raises:
MismatchingIssuer: if the 'iss' received in the response doesn't match the expected value.
MismatchingState: if the response `state` does not match the expected value.
OAuth2Error: if the response includes an error.
MissingAuthCode: if the response does not contain a `code`.
NotImplementedError: if response_type anything else than 'code'
"""
try:
response_url = furl(response)
except ValueError:
return self.on_response_error(response)
# validate 'iss' according to RFC9207
received_issuer = response_url.args.get("iss")
if self.authorization_response_iss_parameter_supported or received_issuer:
if received_issuer is None:
raise MissingIssuer()
if self.issuer and received_issuer != self.issuer:
raise MismatchingIssuer(self.issuer, received_issuer)
# validate state
requested_state = self.state
if requested_state:
received_state = response_url.args.get("state")
if requested_state != received_state:
raise MismatchingState(requested_state, received_state)
error = response_url.args.get("error")
if error:
return self.on_response_error(response)
if "code" in self.response_type:
code: str = response_url.args.get("code")
if code is None:
raise MissingAuthCode()
else:
raise NotImplementedError()
return AuthorizationResponse(
code_verifier=self.code_verifier,
redirect_uri=self.redirect_uri,
nonce=self.nonce,
acr_values=self.acr_values,
max_age=self.max_age,
**response_url.args,
)
def on_response_error(self, response: str) -> AuthorizationResponse:
"""Error handler for Authorization Response errors.
Triggered by [validate_callback()][requests_oauth2client.authorization_request.AuthorizationRequest.validate_callback] if the response uri contains
an error.
Args:
response: the Authorization Response URI. This can be the full URL, or just the query parameters.
Returns:
may return a default code that will be returned by `validate_callback`. But this method will most likely raise exceptions instead.
"""
response_url = furl(response)
error = response_url.args.get("error")
error_description = response_url.args.get("error_description")
error_uri = response_url.args.get("error_uri")
exception_class = self.exception_classes.get(error, AuthorizationResponseError)
raise exception_class(error, error_description, error_uri)
@property
def furl(self) -> furl:
"""Return the Authorization Request URI, as a `furl`."""
return furl(
self.authorization_endpoint,
args={key: value for key, value in self.args.items() if value is not None},
)
@property
def uri(self) -> str:
"""Return the Authorization Request URI, as a `str`."""
return str(self.furl.url)
def __repr__(self) -> str:
"""Return the Authorization Request URI, as a `str`."""
return self.uri
def __eq__(self, other: Any) -> bool:
"""Check if this Authorization Request is the same as another one.
Args:
other: another AuthorizationRequest, or a url as string
Returns:
`True` if the other AuthorizationRequest is the same as this one, `False` otherwise
"""
if isinstance(other, AuthorizationRequest):
return (
self.authorization_endpoint == other.authorization_endpoint
and self.args == other.args
)
elif isinstance(other, str):
return self.uri == other
return super().__eq__(other)
class RequestParameterAuthorizationRequest:
"""Represent an Authorization Request that includes a `request` JWT.
Args:
authorization_endpoint: the Authorization Endpoint uri
client_id: the client_id
request: the request JWT
expires_at: the expiration date for this request
"""
@accepts_expires_in
def __init__(
self,
authorization_endpoint: str,
client_id: str,
request: str,
expires_at: datetime | None = None,
):
self.authorization_endpoint = authorization_endpoint
self.client_id = client_id
self.request = request
self.expires_at = expires_at
@property
def furl(self) -> furl:
"""Return the Authorization Request URI, as a `furl` instance."""
return furl(
self.authorization_endpoint,
args={"client_id": self.client_id, "request": self.request},
)
@property
def uri(self) -> str:
"""Return the Authorization Request URI, as a `str`."""
return str(self.furl.url)
def __repr__(self) -> str:
"""Return the Authorization Request URI, as a `str`.
Returns:
the Authorization Request URI
"""
return self.uri
class RequestUriParameterAuthorizationRequest:
"""Represent an Authorization Request that includes a `request_uri` parameter.
Args:
authorization_endpoint: the Authorization Endpoint uri
client_id: the client_id
request_uri: the request_uri
expires_at: the expiration date for this request
"""
@accepts_expires_in
def __init__(
self,
authorization_endpoint: str,
client_id: str,
request_uri: str,
expires_at: datetime | None = None,
):
self.authorization_endpoint = authorization_endpoint
self.client_id = client_id
self.request_uri = request_uri
self.expires_at = expires_at
@property
def furl(self) -> furl:
"""Return the Authorization Request URI, as a `furl` instance."""
return furl(
self.authorization_endpoint,
args={"client_id": self.client_id, "request_uri": self.request_uri},
)
@property
def uri(self) -> str:
"""Return the Authorization Request URI, as a `str`."""
return str(self.furl.url)
def __repr__(self) -> str:
"""Return the Authorization Request URI, as a `str`."""
return self.uri
class AuthorizationRequestSerializer:
"""(De)Serializer for `AuthorizationRequest` instances.
You might need to store pending authorization requests in session, either server-side or client-
side. This class is here to help you do that.
"""
def __init__(
self,
dumper: Callable[[AuthorizationRequest], str] | None = None,
loader: Callable[[str], AuthorizationRequest] | None = None,
):
self.dumper = dumper or self.default_dumper
self.loader = loader or self.default_loader
@staticmethod
def default_dumper(azr: AuthorizationRequest) -> str:
"""Provide a default dumper implementation.
Serialize an AuthorizationRequest as JSON, then compress with deflate, then encodes
as base64url.
Args:
azr: the `AuthorizationRequest` to serialize
Returns:
the serialized value
"""
return BinaPy.serialize_to("json", azr.as_dict()).to("deflate").to("b64u").ascii()
def default_loader(
self, serialized: str, azr_class: type[AuthorizationRequest] = AuthorizationRequest
) -> AuthorizationRequest:
"""Provide a default deserializer implementation.
This does the opposite operations than `default_dumper`.
Args:
serialized: the serialized AuthorizationRequest
Returns:
an AuthorizationRequest
"""
args = BinaPy(serialized).decode_from("b64u").decode_from("deflate").parse_from("json")
return azr_class(**args)
def dumps(self, azr: AuthorizationRequest) -> str:
"""Serialize and compress a given AuthorizationRequest for easier storage.
Args:
azr: an AuthorizationRequest to serialize
Returns:
the serialized AuthorizationRequest, as a str
"""
return self.dumper(azr)
def loads(self, serialized: str) -> AuthorizationRequest:
"""Deserialize a serialized AuthorizationRequest.
Args:
serialized: the serialized AuthorizationRequest
Returns:
the deserialized AuthorizationRequest
"""
return self.loader(serialized) | /requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/authorization_request.py | 0.941358 | 0.471223 | authorization_request.py | pypi |
from __future__ import annotations
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any
from .pooling import TokenEndpointPoolingJob
from .tokens import BearerToken
from .utils import accepts_expires_in
if TYPE_CHECKING: # pragma: no cover
from .client import OAuth2Client
class BackChannelAuthenticationResponse:
"""Represent a BackChannel Authentication Response.
This contains all the parameters that are returned by the AS as a result of a BackChannel Authentication Request,
such as `auth_req_id` (required), and the optional `expires_at`, `interval`, and/or any custom parameters.
Args:
auth_req_id: the `auth_req_id` as returned by the AS.
expires_at: the date when the `auth_req_id` expires.
Note that this request also accepts an `expires_in` parameter, in seconds.
interval: the Token Endpoint pooling interval, in seconds, as returned by the AS.
**kwargs: any additional custom parameters as returned by the AS.
"""
@accepts_expires_in
def __init__(
self,
auth_req_id: str,
expires_at: datetime | None = None,
interval: int | None = 20,
**kwargs: Any,
):
self.auth_req_id = auth_req_id
self.expires_at = expires_at
self.interval = interval
self.other = kwargs
def is_expired(self, leeway: int = 0) -> bool | None:
"""Return `True` if the `auth_req_id` within this response is expired.
Expiration is evaluated at the time of the call.
If there is no "expires_at" hint (which is derived from the `expires_in` hint returned by the AS BackChannel Authentication endpoint), this will return `None`.
Returns:
`True` if the auth_req_id is expired, `False` if it is still valid, `None` if there is no `expires_in` hint.
"""
if self.expires_at:
return datetime.now() - timedelta(seconds=leeway) > self.expires_at
return None
def __getattr__(self, key: str) -> Any:
"""Return attributes from this `BackChannelAuthenticationResponse`.
Allows accessing response parameters with `token_response.expires_in` or `token_response.any_custom_attribute`.
Args:
key: a key
Returns:
the associated value in this token response
Raises:
AttributeError: if the attribute is not present in the response
"""
if key == "expires_in":
if self.expires_at is None:
return None
return int(self.expires_at.timestamp() - datetime.now().timestamp())
return self.other.get(key) or super().__getattribute__(key)
class BackChannelAuthenticationPoolingJob(TokenEndpointPoolingJob):
"""A pooling job for the BackChannel Authentication flow.
This will poll the Token Endpoint until the user finishes with its authentication.
Args:
client: an OAuth2Client that will be used to pool the token endpoint.
auth_req_id: an `auth_req_id` as `str` or a `BackChannelAuthenticationResponse`.
interval: The pooling interval to use. This overrides the one in `auth_req_id` if it is a `BackChannelAuthenticationResponse`.
slow_down_interval: Number of seconds to add to the pooling interval when the AS returns a slow down request.
requests_kwargs: Additional parameters for the underlying calls to [requests.request][].
**token_kwargs: Additional parameters for the token request.
Usage:
```python
client = OAuth2Client(
token_endpoint="https://my.as.local/token", auth=("client_id", "client_secret")
)
pool_job = BackChannelAuthenticationPoolingJob(
client=client, auth_req_id="my_auth_req_id"
)
token = None
while token is None:
token = pool_job()
```
"""
def __init__(
self,
client: OAuth2Client,
auth_req_id: str | BackChannelAuthenticationResponse,
*,
interval: int | None = None,
slow_down_interval: int = 5,
requests_kwargs: dict[str, Any] | None = None,
**token_kwargs: Any,
):
if isinstance(auth_req_id, BackChannelAuthenticationResponse) and interval is None:
interval = auth_req_id.interval
super().__init__(
client=client,
interval=interval,
slow_down_interval=slow_down_interval,
requests_kwargs=requests_kwargs,
**token_kwargs,
)
self.auth_req_id = auth_req_id
def token_request(self) -> BearerToken:
"""Implement the CIBA token request.
This actually calls [OAuth2Client.ciba(auth_req_id)] on `client`.
Returns:
a [BearerToken][requests_oauth2client.tokens.BearerToken]
"""
return self.client.ciba(
self.auth_req_id, requests_kwargs=self.requests_kwargs, **self.token_kwargs
) | /requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/backchannel_authentication.py | 0.940993 | 0.374876 | backchannel_authentication.py | pypi |
from __future__ import annotations
from datetime import datetime
from typing import Any, Callable
from uuid import uuid4
import furl # type: ignore[import]
import requests
from binapy import BinaPy
from jwskate import Jwk, Jwt, SymmetricJwk
class BaseClientAuthenticationMethod(requests.auth.AuthBase):
"""Base class for all Client Authentication methods. This extends [requests.auth.AuthBase].
This base class only checks that requests are suitable to add Client Authentication parameters
to, and doesn't modify the request.
"""
def __init__(self, client_id: str):
self.client_id = str(client_id)
def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest:
"""Check that the request is suitable for Client Authentication.
It checks:
* that the method is `POST`
* that the Content-Type is "application/x-www-form-urlencoded" or None
Args:
request: a [requests.PreparedRequest][]
Returns:
a [requests.PreparedRequest][], unmodified
Raises:
RuntimeError: if the request is not suitable for OAuth 2.0 Client Authentication
"""
if request.method != "POST" or request.headers.get("Content-Type") not in (
"application/x-www-form-urlencoded",
None,
):
raise RuntimeError(
"This request is not suitable for OAuth 2.0 Client Authentication"
)
return request
class ClientSecretBasic(BaseClientAuthenticationMethod):
"""Implement `client_secret_basic` authentication.
With this method, the client sends its Client ID and Secret, in the Authorization header, with the "Basic" scheme,
in each authenticated request to the AS.
Args:
client_id: `client_id` to use.
client_secret: `client_secret` to use.
"""
def __init__(self, client_id: str, client_secret: str):
super().__init__(client_id)
self.client_secret = str(client_secret)
def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest:
"""Add the appropriate `Authorization` header in each request.
The Authorization header is formatted as such:
`Authorization: Basic BASE64('<client_id:client_secret>')`
Args:
request: a [requests.PreparedRequest][].
Returns:
a [requests.PreparedRequest][] with the added Authorization header.
"""
request = super().__call__(request)
b64encoded_credentials = (
BinaPy(f"{self.client_id}:{self.client_secret}").to("b64").ascii()
)
request.headers["Authorization"] = f"Basic {b64encoded_credentials}"
return request
class ClientSecretPost(BaseClientAuthenticationMethod):
"""Implement `client_secret_post` client authentication method.
With this method, the client inserts its client_id and client_secret in each authenticated request to the AS.
Args:
client_id: `client_id` to use.
client_secret: `client_secret` to use.
"""
def __init__(self, client_id: str, client_secret: str) -> None:
super().__init__(client_id)
self.client_secret = str(client_secret)
def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest:
"""Add the `client_id` and `client_secret` parameters in the request body.
Args:
request: a [requests.PreparedRequest][].
Returns:
a [requests.PreparedRequest][] with the added client credentials fields.
"""
request = super().__call__(request)
data = furl.Query(request.body)
data.set([("client_id", self.client_id), ("client_secret", self.client_secret)])
request.prepare_body(data.params, files=None)
return request
class ClientAssertionAuthenticationMethod(BaseClientAuthenticationMethod):
"""Base class for assertion based client authentication methods.
Args:
client_id: the client_id to use
alg: the alg to use to sign generated Client Assertions.
lifetime: the lifetime to use for generated Client Assertions.
jti_gen: a function to generate JWT Token Ids (`jti`) for generated Client Assertions.
aud: the audience value to use. If `None` (default), the endpoint URL will be used.
"""
def __init__(
self,
client_id: str,
alg: str,
lifetime: int,
jti_gen: Callable[[], str],
aud: str | None = None,
) -> None:
super().__init__(client_id)
self.alg = alg
self.lifetime = lifetime
self.jti_gen = jti_gen
self.aud = aud
def client_assertion(self, audience: str) -> str:
"""Generate a Client Assertion for a specific audience.
Args:
audience: the audience to use for the `aud` claim of the generated Client Assertion.
Returns:
a Client Assertion, as `str`.
"""
raise NotImplementedError() # pragma: no cover
def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest:
"""Add a `client_assertion` field in the request body.
Args:
request: a [requests.PreparedRequest][].
Returns:
a [requests.PreparedRequest][] with the added `client_assertion` field.
"""
request = super().__call__(request)
audience = self.aud or request.url
assert audience is not None
data = furl.Query(request.body)
client_assertion = self.client_assertion(audience)
data.set(
[
("client_id", self.client_id),
("client_assertion", client_assertion),
(
"client_assertion_type",
"urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
),
]
)
request.prepare_body(data.params, files=None)
return request
class ClientSecretJwt(ClientAssertionAuthenticationMethod):
"""Implement `client_secret_jwt` client authentication method.
With this method, client generates and signs a client assertion that is symmetrically signed with its Client Secret.
The assertion is then sent to the AS in a `client_assertion` field with each authenticated request.
Args:
client_id: the `client_id` to use.
client_secret: the `client_secret` to use to sign generated Client Assertions.
alg: the alg to use to sign generated Client Assertions.
lifetime: the lifetime to use for generated Client Assertions.
jti_gen: a function to generate JWT Token Ids (`jti`) for generated Client Assertions.
aud: the audience value to use. If `None` (default), the endpoint URL will be used.
"""
def __init__(
self,
client_id: str,
client_secret: str,
alg: str = "HS256",
lifetime: int = 60,
jti_gen: Callable[[], Any] = lambda: uuid4(),
aud: str | None = None,
) -> None:
super().__init__(client_id, alg, lifetime, jti_gen, aud)
self.client_secret = str(client_secret)
def client_assertion(self, audience: str) -> str:
"""Generate a symmetrically signed Client Assertion.
Assertion is signed with the `client_secret` as key and the `alg` passed at init time.
Args:
audience: the audience to use for the generated Client Assertion.
Returns:
a Client Assertion, as `str`.
"""
iat = int(datetime.now().timestamp())
exp = iat + self.lifetime
jti = str(self.jti_gen())
jwk = SymmetricJwk.from_bytes(self.client_secret.encode())
jwt = Jwt.sign(
claims={
"iss": self.client_id,
"sub": self.client_id,
"aud": audience,
"iat": iat,
"exp": exp,
"jti": jti,
},
key=jwk,
alg=self.alg,
)
return str(jwt)
class PrivateKeyJwt(ClientAssertionAuthenticationMethod):
"""Implement `private_key_jwt` client authentication method.
With this method, the client generates and sends a client_assertion, that is
asymmetrically signed with a private key, on each direct request to the Authorization Server.
Args:
client_id: the `client_id` to use.
private_jwk: the private JWK to use to sign generated Client Assertions.
alg: the alg to use to sign generated Client Assertions.
lifetime: the lifetime to use for generated Client Assertions.
jti_gen: a function to generate JWT Token Ids (`jti`) for generated Client Assertions.
aud: the audience value to use. If `None` (default), the endpoint URL will be used.k
"""
def __init__(
self,
client_id: str,
private_jwk: Jwk | dict[str, Any],
alg: str = "RS256",
lifetime: int = 60,
jti_gen: Callable[[], Any] = lambda: uuid4(),
aud: str | None = None,
) -> None:
if not isinstance(private_jwk, Jwk):
private_jwk = Jwk(private_jwk)
if not private_jwk.is_private or private_jwk.is_symmetric:
raise ValueError(
"Private Key JWT client authentication method uses asymmetric signing thus requires a private key."
)
alg = private_jwk.alg or alg
if not alg:
raise ValueError(
"An asymmetric signing alg is required, either as part of the private JWK, or passed as parameter."
)
kid = private_jwk.get("kid")
if not kid:
raise ValueError(
"Asymmetric signing requires the private JWK to have a Key ID (kid)."
)
super().__init__(client_id, alg, lifetime, jti_gen, aud)
self.private_jwk = private_jwk
def client_assertion(self, audience: str) -> str:
"""Generate a Client Assertion, asymmetrically signed with `private_jwk` as key.
Args:
audience: the audience to use for the generated Client Assertion.
Returns:
a Client Assertion.
"""
iat = int(datetime.now().timestamp())
exp = iat + self.lifetime
jti = str(self.jti_gen())
jwt = Jwt.sign(
claims={
"iss": self.client_id,
"sub": self.client_id,
"aud": audience,
"iat": iat,
"exp": exp,
"jti": jti,
},
key=self.private_jwk,
alg=self.alg,
)
return str(jwt)
class PublicApp(BaseClientAuthenticationMethod):
"""Implement the `none` authentication method for public apps.
This scheme is used for Public Clients, which do not have any secret credentials. Those only
send their client_id to the Authorization Server.
Args:
client_id: the client_id to use.
"""
def __init__(self, client_id: str) -> None:
self.client_id = client_id
def __call__(self, request: requests.PreparedRequest) -> requests.PreparedRequest:
"""Add the `client_id` field in the request body.
Args:
request: a [requests.PreparedRequest][].
Returns:
a [requests.PreparedRequest][] with the added `client_id` field.
"""
request = super().__call__(request)
data = furl.Query(request.body)
data.set([("client_id", self.client_id)])
request.prepare_body(data.params, files=None)
return request
def client_auth_factory(
auth: (
requests.auth.AuthBase
| tuple[str, str]
| tuple[str, Jwk]
| tuple[str, dict[str, Any]]
| str
| None
),
*,
client_id: str | None = None,
client_secret: str | None = None,
private_key: Jwk | dict[str, Any] | None = None,
default_auth_handler: (
type[ClientSecretPost] | type[ClientSecretBasic] | type[ClientSecretJwt]
) = ClientSecretPost,
) -> requests.auth.AuthBase:
"""Initialize the appropriate Auth Handler based on the provided parameters.
This initializes a `ClientAuthenticationMethod` subclass based on the provided parameters.
Args:
auth: can be:
- a `requests.auth.AuthBase` instance (which will be used directly)
- a tuple of (client_id, client_secret) which will be used to initialize an instance of `default_auth_handler`,
- a tuple of (client_id, jwk), used to initialize a `PrivateKeyJwk` (`jwk` being an instance of `jwskate.Jwk` or a `dict`),
- a `client_id`, as `str`,
- or `None`, to pass `client_id` and other credentials as dedicated parameters, see below.
client_id: the Client ID to use for this client
client_secret: the Client Secret to use for this client, if any (for clients using an authentication method based on a secret)
private_key: the private key to use for private_key_jwt authentication method
default_auth_handler: if a client_id and client_secret are provided, initialize an instance of this class with those 2 parameters.
You can choose between `ClientSecretBasic`, `ClientSecretPost`, or `ClientSecretJwt`.
Returns:
an Auth Handler that will manage client authentication to the AS Token Endpoint or other backend endpoints.
"""
if auth is not None and (
client_id is not None or client_secret is not None or private_key is not None
):
raise ValueError(
"Please use either `auth` parameter to provide an authentication method, or use `client_id` and one of `client_secret` or `private_key`."
)
if isinstance(auth, str):
client_id = auth
elif isinstance(auth, requests.auth.AuthBase):
return auth
elif isinstance(auth, tuple) and len(auth) == 2:
client_id, credential = auth
if isinstance(credential, (Jwk, dict)):
private_key = credential
elif isinstance(credential, str):
client_secret = credential
else:
raise TypeError(
"This credential type is not supported:", type(credential), credential
)
if client_id is None:
raise ValueError("A client_id must be provided.")
if private_key is not None:
return PrivateKeyJwt(str(client_id), private_key)
elif client_secret is None:
return PublicApp(str(client_id))
else:
return default_auth_handler(str(client_id), str(client_secret)) | /requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/client_authentication.py | 0.933522 | 0.198045 | client_authentication.py | pypi |
from __future__ import annotations
from typing import Any, Iterable
import requests
from jwskate import Jwk, JwkSet, Jwt
from typing_extensions import Literal
from .auth import BearerAuth
from .authorization_request import (
AuthorizationRequest,
AuthorizationResponse,
RequestUriParameterAuthorizationRequest,
)
from .backchannel_authentication import BackChannelAuthenticationResponse
from .client_authentication import ClientSecretPost, PrivateKeyJwt, client_auth_factory
from .device_authorization import DeviceAuthorizationResponse
from .discovery import oidc_discovery_document_url
from .exceptions import (
AccessDenied,
AuthorizationPending,
BackChannelAuthenticationError,
DeviceAuthorizationError,
ExpiredToken,
IntrospectionError,
InvalidBackChannelAuthenticationResponse,
InvalidClient,
InvalidDeviceAuthorizationResponse,
InvalidGrant,
InvalidPushedAuthorizationResponse,
InvalidRequest,
InvalidScope,
InvalidTarget,
InvalidTokenResponse,
RevocationError,
ServerError,
SlowDown,
UnauthorizedClient,
UnknownIntrospectionError,
UnknownTokenEndpointError,
UnsupportedTokenType,
)
from .tokens import BearerToken, IdToken
from .utils import validate_endpoint_uri
class OAuth2Client:
"""An OAuth 2.x client, that can send requests to an OAuth 2.x Authorization Server.
`OAuth2Client` is able to obtain tokens from the Token Endpoint using any of the standardised Grant Types,
and to communicate with the various backend endpoints like the Revocation, Introspection, and UserInfo Endpoint.
To init an OAuth2Client, you only need the url to the Token Endpoint and the Credentials that will be used to authenticate
to that endpoint. Other endpoint urls, such as the can be passed as parameter as well if you intend to use them.
This class is not intended to help with the end-user authentication or any request that goes in a browser.
For authentication requests, see [AuthorizationRequest][requests_oauth2client.authorization_request.AuthorizationRequest].
You may use the helper method `authorization_request()` to generate `AuthorizationRequest`s with the preconfigured
`authorization_endpoint`, `client_id` and `redirect_uri' from this client.
Args:
token_endpoint: the Token Endpoint URI where this client will get access tokens
auth: the authentication handler to use for client authentication on the token endpoint. Can be a [requests.auth.AuthBase][] instance (which will be as-is), or a tuple of `(client_id, client_secret)` which will initialize an instance of [ClientSecretPost][requests_oauth2client.client_authentication.ClientSecretPost], a `(client_id, jwk)` to initialize a [PrivateKeyJwt][requests_oauth2client.client_authentication.PrivateKeyJwt], or a `client_id` which will use [PublicApp][requests_oauth2client.client_authentication.PublicApp] authentication.
client_id: client ID
client_secret: client secret
private_key: private_key to use for client authentication
revocation_endpoint: the Revocation Endpoint URI to use for revoking tokens
introspection_endpoint: the Introspection Endpoint URI to use to get info about tokens
userinfo_endpoint: the Userinfo Endpoint URI to use to get information about the user
authorization_endpoint: the Authorization Endpoint URI for initializing Authorization Requests
redirect_uri: the redirect_uri for this client
backchannel_authentication_endpoint: the BackChannel Authentication URI
device_authorization_endpoint: the Device Authorization Endpoint URI to use to authorize devices
jwks_uri: the JWKS URI to use to obtain the AS public keys
code_challenge_method: challenge method to use for PKCE (should always be 'S256')
session: a requests Session to use when sending HTTP requests. Useful if some extra parameters such as proxy or client certificate must be used to connect to the AS.
**extra_metadata: additional metadata for this client, unused by this class, but may be used by subclasses. Those will be accessible with the `extra_metadata` attribute.
Usage:
```python
client = OAuth2Client(
token_endpoint="https://my.as.local/token",
revocation_endpoint="https://my.as.local/revoke",
auth=("client_id", "client_secret"),
)
# once initialized, a client can send requests to its configured endpoints
cc_token = client.client_credentials(scope="my_scope")
ac_token = client.authorization_code(code="my_code")
client.revoke_access_token(cc_token)
```
"""
exception_classes: dict[str, type[Exception]] = {
"server_error": ServerError,
"invalid_request": InvalidRequest,
"invalid_client": InvalidClient,
"invalid_scope": InvalidScope,
"invalid_target": InvalidTarget,
"invalid_grant": InvalidGrant,
"access_denied": AccessDenied,
"unauthorized_client": UnauthorizedClient,
"authorization_pending": AuthorizationPending,
"slow_down": SlowDown,
"expired_token": ExpiredToken,
"unsupported_token_type": UnsupportedTokenType,
}
token_class: type[BearerToken] = BearerToken
def __init__(
self,
token_endpoint: str,
auth: (
requests.auth.AuthBase
| tuple[str, str]
| tuple[str, Jwk]
| tuple[str, dict[str, Any]]
| str
| None
) = None,
*,
client_id: str | None = None,
client_secret: str | None = None,
private_key: Jwk | dict[str, Any] | None = None,
revocation_endpoint: str | None = None,
introspection_endpoint: str | None = None,
userinfo_endpoint: str | None = None,
authorization_endpoint: str | None = None,
redirect_uri: str | None = None,
backchannel_authentication_endpoint: str | None = None,
device_authorization_endpoint: str | None = None,
pushed_authorization_request_endpoint: str | None = None,
jwks_uri: str | None = None,
authorization_server_jwks: JwkSet | dict[str, Any] | None = None,
issuer: str | None = None,
id_token_signed_response_alg: str | None = "RS256",
id_token_encrypted_response_alg: str | None = None,
id_token_decryption_key: Jwk | dict[str, Any] | None = None,
code_challenge_method: str = "S256",
authorization_response_iss_parameter_supported: bool = False,
session: requests.Session | None = None,
**extra_metadata: Any,
):
if authorization_response_iss_parameter_supported and not issuer:
raise ValueError(
"If the Authorization Server supports Issuer Identification, "
"as specified by `authorization_response_iss_parameter_supported=True`, "
"then you must specify the expected `issuer` value with parameter `issuer`."
)
self.token_endpoint = str(token_endpoint)
self.revocation_endpoint = str(revocation_endpoint) if revocation_endpoint else None
self.introspection_endpoint = (
str(introspection_endpoint) if introspection_endpoint else None
)
self.userinfo_endpoint = str(userinfo_endpoint) if userinfo_endpoint else None
self.authorization_endpoint = (
str(authorization_endpoint) if authorization_endpoint else None
)
self.redirect_uri = str(redirect_uri) if redirect_uri else None
self.backchannel_authentication_endpoint = (
str(backchannel_authentication_endpoint)
if backchannel_authentication_endpoint
else None
)
self.device_authorization_endpoint = (
str(device_authorization_endpoint) if device_authorization_endpoint else None
)
self.pushed_authorization_request_endpoint = (
str(pushed_authorization_request_endpoint)
if pushed_authorization_request_endpoint
else None
)
self.jwks_uri = str(jwks_uri) if jwks_uri else None
self.authorization_server_jwks = (
JwkSet(authorization_server_jwks) if authorization_server_jwks else None
)
self.issuer = str(issuer) if issuer else None
self.session = session or requests.Session()
self.auth = client_auth_factory(
auth,
client_id=client_id,
client_secret=client_secret,
private_key=private_key,
default_auth_handler=ClientSecretPost,
)
self.id_token_signed_response_alg = id_token_signed_response_alg
self.id_token_encrypted_response_alg = id_token_encrypted_response_alg
self.id_token_decryption_key = (
Jwk(id_token_decryption_key) if id_token_decryption_key else None
)
self.code_challenge_method = code_challenge_method
self.authorization_response_iss_parameter_supported = (
authorization_response_iss_parameter_supported
)
self.extra_metadata = extra_metadata
@property
def client_id(self) -> str:
"""Client ID."""
if hasattr(self.auth, "client_id"):
return self.auth.client_id # type: ignore[no-any-return]
raise AttributeError( # pragma: no cover
"This client uses a custom authentication method without client_id."
)
@property
def client_secret(self) -> str | None:
"""Client Secret."""
if hasattr(self.auth, "client_secret"):
return self.auth.client_secret # type: ignore[no-any-return]
return None
@property
def client_jwks(self) -> JwkSet:
"""A `JwkSet` containing the public keys for this client.
Keys are:
- the public key for client assertion signature verification (if using private_key_jwt)
- the ID Token encryption key
"""
jwks = JwkSet()
if isinstance(self.auth, PrivateKeyJwt):
jwks.add_jwk(self.auth.private_jwk.public_jwk().with_usage_parameters())
if self.id_token_decryption_key:
jwks.add_jwk(self.id_token_decryption_key.public_jwk().with_usage_parameters())
return jwks
def token_request(
self, data: dict[str, Any], timeout: int = 10, **requests_kwargs: Any
) -> BearerToken:
"""Send a request to the token endpoint.
Authentication will be added automatically based on the defined `auth` for this client.
Args:
data: parameters to send to the token endpoint. Items with a None or empty value will not be sent in the request.
timeout: a timeout value for the call
**requests_kwargs: additional parameters for requests.post()
Returns:
the token endpoint response, as [`BearerToken`][requests_oauth2client.tokens.BearerToken] instance.
"""
token_endpoint = self._require_endpoint("token_endpoint")
requests_kwargs = {
key: value
for key, value in requests_kwargs.items()
if value is not None and value != ""
}
response = self.session.post(
token_endpoint,
auth=self.auth,
data=data,
timeout=timeout,
**requests_kwargs,
)
if response.ok:
return self.parse_token_response(response)
return self.on_token_error(response)
def parse_token_response(self, response: requests.Response) -> BearerToken:
"""Parse a Response returned by the Token Endpoint.
Invoked by [token_request][requests_oauth2client.client.OAuth2Client.token_request] to parse responses returned by the Token Endpoint.
Those response contain an `access_token` and additional attributes.
Args:
response: the [Response][requests.Response] returned by the Token Endpoint.
Returns:
a [`BearerToken`][requests_oauth2client.tokens.BearerToken] based on the response contents.
"""
try:
token_response = self.token_class(**response.json())
return token_response
except Exception as response_class_exc:
try:
return self.on_token_error(response)
except Exception as token_error_exc:
raise token_error_exc from response_class_exc
def on_token_error(self, response: requests.Response) -> BearerToken:
"""Error handler for `token_request()`.
Invoked by [token_request][requests_oauth2client.client.OAuth2Client.token_request] when the Token Endpoint returns an error.
Args:
response: the [Response][requests.Response] returned by the Token Endpoint.
Returns:
nothing, and raises an exception instead. But a subclass may return a [`BearerToken`][requests_oauth2client.tokens.BearerToken] to implement a default behaviour if needed.
"""
try:
data = response.json()
error = data["error"]
error_description = data.get("error_description")
error_uri = data.get("error_uri")
exception_class = self.exception_classes.get(error, UnknownTokenEndpointError)
exception = exception_class(response, error, error_description, error_uri)
except Exception as exc:
raise InvalidTokenResponse(response) from exc
raise exception
def client_credentials(
self,
scope: str | Iterable[str] | None = None,
requests_kwargs: dict[str, Any] | None = None,
**token_kwargs: Any,
) -> BearerToken:
"""Send a request to the token endpoint using the `client_credentials` grant.
Args:
scope: the scope to send with the request. Can be a str, or an iterable of str.
to pass that way include `scope`, `audience`, `resource`, etc.
requests_kwargs: additional parameters for the call to requests
**token_kwargs: additional parameters for the token endpoint, alongside `grant_type`. Common parameters
Returns:
a TokenResponse
"""
requests_kwargs = requests_kwargs or {}
if scope is not None and not isinstance(scope, str):
try:
scope = " ".join(scope)
except Exception as exc:
raise ValueError("Unsupported scope value") from exc
data = dict(grant_type="client_credentials", scope=scope, **token_kwargs)
return self.token_request(data, **requests_kwargs)
def authorization_code(
self,
code: str | AuthorizationResponse,
validate: bool = True,
requests_kwargs: dict[str, Any] | None = None,
**token_kwargs: Any,
) -> BearerToken:
"""Send a request to the token endpoint with the `authorization_code` grant.
Args:
code: an authorization code or an `AuthorizationResponse` to exchange for tokens
validate: if `True`, validate the received ID Token (this works only if `code` is an AuthorizationResponse)
requests_kwargs: additional parameters for the call to requests
**token_kwargs: additional parameters for the token endpoint, alongside `grant_type`, `code`, etc.
Returns:
a `BearerToken`
"""
azr: AuthorizationResponse | None = None
if isinstance(code, AuthorizationResponse):
token_kwargs.setdefault("code_verifier", code.code_verifier)
token_kwargs.setdefault("redirect_uri", code.redirect_uri)
azr = code
code = code.code
requests_kwargs = requests_kwargs or {}
data = dict(grant_type="authorization_code", code=code, **token_kwargs)
token = self.token_request(data, **requests_kwargs)
if validate and token.id_token and isinstance(azr, AuthorizationResponse):
token.validate_id_token(self, azr)
return token
def refresh_token(
self,
refresh_token: str | BearerToken,
requests_kwargs: dict[str, Any] | None = None,
**token_kwargs: Any,
) -> BearerToken:
"""Send a request to the token endpoint with the `refresh_token` grant.
Args:
refresh_token: a refresh_token, as a string, or as a `BearerToken`. That `BearerToken` must have a `refresh_token`.
requests_kwargs: additional parameters for the call to `requests`
**token_kwargs: additional parameters for the token endpoint, alongside `grant_type`, `refresh_token`, etc.
Returns:
a `BearerToken`
"""
if isinstance(refresh_token, BearerToken):
if refresh_token.refresh_token is None or not isinstance(
refresh_token.refresh_token, str
):
raise ValueError("This BearerToken doesn't have a refresh_token")
refresh_token = refresh_token.refresh_token
requests_kwargs = requests_kwargs or {}
data = dict(grant_type="refresh_token", refresh_token=refresh_token, **token_kwargs)
return self.token_request(data, **requests_kwargs)
def device_code(
self,
device_code: str | DeviceAuthorizationResponse,
requests_kwargs: dict[str, Any] | None = None,
**token_kwargs: Any,
) -> BearerToken:
"""Send a request to the token endpoint using the Device Code grant.
The grant_type is `urn:ietf:params:oauth:grant-type:device_code`.
This needs a Device Code, or a `DeviceAuthorizationResponse` as parameter.
Args:
device_code: a device code, or a `DeviceAuthorizationResponse`
requests_kwargs: additional parameters for the call to requests
**token_kwargs: additional parameters for the token endpoint, alongside `grant_type`, `device_code`, etc.
Returns:
a `BearerToken`
"""
if isinstance(device_code, DeviceAuthorizationResponse):
if device_code.device_code is None or not isinstance(device_code.device_code, str):
raise ValueError("This DeviceAuthorizationResponse doesn't have a device_code")
device_code = device_code.device_code
requests_kwargs = requests_kwargs or {}
data = dict(
grant_type="urn:ietf:params:oauth:grant-type:device_code",
device_code=device_code,
**token_kwargs,
)
return self.token_request(data, **requests_kwargs)
def ciba(
self,
auth_req_id: str | BackChannelAuthenticationResponse,
requests_kwargs: dict[str, Any] | None = None,
**token_kwargs: Any,
) -> BearerToken:
"""Send a CIBA request to the Token Endpoint.
A CIBA request is a Token Request using the `urn:openid:params:grant-type:ciba` grant.
Args:
auth_req_id: an authentication request ID, as returned by the AS
requests_kwargs: additional parameters for the call to requests
**token_kwargs: additional parameters for the token endpoint, alongside `grant_type`, `auth_req_id`, etc.
Returns:
a `BearerToken`
"""
if isinstance(auth_req_id, BackChannelAuthenticationResponse):
if auth_req_id.auth_req_id is None or not isinstance(auth_req_id.auth_req_id, str):
raise ValueError(
"This BackChannelAuthenticationResponse doesn't have an auth_req_id"
)
auth_req_id = auth_req_id.auth_req_id
requests_kwargs = requests_kwargs or {}
data = dict(
grant_type="urn:openid:params:grant-type:ciba",
auth_req_id=auth_req_id,
**token_kwargs,
)
return self.token_request(data, **requests_kwargs)
def token_exchange(
self,
subject_token: str | BearerToken | IdToken,
subject_token_type: str | None = None,
actor_token: None | str | BearerToken | IdToken = None,
actor_token_type: str | None = None,
requested_token_type: str | None = None,
requests_kwargs: dict[str, Any] | None = None,
**token_kwargs: Any,
) -> BearerToken:
"""Send a Token Exchange request.
A Token Exchange request is actually a request to the Token Endpoint with a grant_type `urn:ietf:params:oauth:grant-type:token-exchange`.
Args:
subject_token: the subject token to exchange for a new token.
subject_token_type: a token type identifier for the subject_token, mandatory if it cannot be guessed based
on `type(subject_token)`.
actor_token: the actor token to include in the request, if any.
actor_token_type: a token type identifier for the actor_token, mandatory if it cannot be guessed based
on `type(actor_token)`.
requested_token_type: a token type identifier for the requested token.
requests_kwargs: additional parameters to pass to the underlying `requests.post()` call.
**token_kwargs: additional parameters to include in the request body.
Returns:
a `BearerToken` as returned by the Authorization Server.
"""
requests_kwargs = requests_kwargs or {}
try:
subject_token_type = self.get_token_type(subject_token_type, subject_token)
except ValueError:
raise TypeError(
"Cannot determine the kind of 'subject_token' you provided. "
"Please specify a 'subject_token_type'."
)
if actor_token: # pragma: no branch
try:
actor_token_type = self.get_token_type(actor_token_type, actor_token)
except ValueError:
raise TypeError(
"Cannot determine the kind of 'actor_token' you provided. "
"Please specify an 'actor_token_type'."
)
data = dict(
grant_type="urn:ietf:params:oauth:grant-type:token-exchange",
subject_token=subject_token,
subject_token_type=subject_token_type,
actor_token=actor_token,
actor_token_type=actor_token_type,
requested_token_type=requested_token_type,
**token_kwargs,
)
return self.token_request(data, **requests_kwargs)
def jwt_bearer(
self,
assertion: Jwt | str,
requests_kwargs: dict[str, Any] | None = None,
**token_kwargs: Any,
) -> BearerToken:
"""Send a request using a JWT as authorization grant.
This is a defined in (RFC7523 $2.1)[https://www.rfc-editor.org/rfc/rfc7523.html#section-2.1).
Args:
assertion: a JWT (as an instance of `jwskate.Jwt` or as a `str`) to use as authorization grant.
requests_kwargs: additional parameters to pass to the underlying `requests.post()` call.
**token_kwargs: additional parameters to include in the request body.
Returns:
a `BearerToken` as returned by the Authorization Server.
"""
requests_kwargs = requests_kwargs or {}
if not isinstance(assertion, Jwt):
assertion = Jwt(assertion)
data = dict(
grant_type="urn:ietf:params:oauth:grant-type:jwt-bearer",
assertion=assertion,
**token_kwargs,
)
return self.token_request(data, **requests_kwargs)
def resource_owner_password(
self,
username: str,
password: str,
requests_kwargs: dict[str, Any] | None = None,
**token_kwargs: Any,
) -> BearerToken:
"""Send a request using the Resource Owner Password Grant.
This Grant Type is deprecated and should only be used when there is no other choice.
Args:
username: the resource owner user name
password: the resource owner password
requests_kwargs: additional parameters to pass to the underlying `requests.post()` call.
**token_kwargs: additional parameters to include in the request body.
Returns:
a `BearerToken` as returned by the Authorization Server
"""
requests_kwargs = requests_kwargs or {}
data = dict(
grant_type="password",
username=username,
password=password,
**token_kwargs,
)
return self.token_request(data, **requests_kwargs)
def authorization_request(
self,
scope: None | str | Iterable[str] = "openid",
response_type: str = "code",
redirect_uri: str | None = None,
state: str | Literal[True] | None = True,
nonce: str | Literal[True] | None = True,
code_verifier: str | None = None,
**kwargs: Any,
) -> AuthorizationRequest:
"""Generate an Authorization Request for this client.
Args:
scope: the scope to use
response_type: the response_type to use
redirect_uri: the redirect_uri to include in the request. By default, the redirect_uri defined at init time is used.
state: the state parameter to use. Leave default to generate a random value.
nonce: a nonce. Leave default to generate a random value.
code_verifier: the PKCE code verifier to use. Leave default to generate a random value.
**kwargs: additional parameters to include in the auth request
Returns:
an AuthorizationRequest with the supplied parameters
"""
authorization_endpoint = self._require_endpoint("authorization_endpoint")
redirect_uri = redirect_uri or self.redirect_uri
if not redirect_uri:
raise AttributeError(
"No 'redirect_uri' defined for this client. "
"You must either pass a redirect_uri as parameter to this method, "
"or include a redirect_uri when initializing your OAuth2Client."
)
if response_type != "code":
raise ValueError("Only response_type=code is supported.")
return AuthorizationRequest(
authorization_endpoint=authorization_endpoint,
client_id=self.client_id,
redirect_uri=redirect_uri,
issuer=self.issuer,
response_type=response_type,
scope=scope,
state=state,
nonce=nonce,
code_verifier=code_verifier,
code_challenge_method=self.code_challenge_method,
**kwargs,
)
def pushed_authorization_request(
self, authorization_request: AuthorizationRequest
) -> RequestUriParameterAuthorizationRequest:
"""Send a Pushed Authorization Request.
This sends a request to the Pushed Authorization Request Endpoint, and returns a
`RequestUriParameterAuthorizationRequest` initialized with the AS response.
Args:
authorization_request: the authorization request to send
Returns:
the `RequestUriParameterAuthorizationRequest` initialized based on the AS response
"""
pushed_authorization_request_endpoint = self._require_endpoint(
"pushed_authorization_request_endpoint"
)
response = self.session.post(
pushed_authorization_request_endpoint,
data=authorization_request.args,
auth=self.auth,
)
if not response.ok:
return self.on_pushed_authorization_request_error(response)
response_json = response.json()
request_uri = response_json.get("request_uri")
expires_in = response_json.get("expires_in")
return RequestUriParameterAuthorizationRequest(
authorization_endpoint=authorization_request.authorization_endpoint,
client_id=authorization_request.client_id,
request_uri=request_uri,
expires_in=expires_in,
)
def on_pushed_authorization_request_error(
self, response: requests.Response
) -> RequestUriParameterAuthorizationRequest:
"""Error Handler for Pushed Authorization Endpoint errors.
Args:
response: the HTTP response as returned by the AS PAR endpoint.
Returns:
a RequestUriParameterAuthorizationRequest, if the error is recoverable
Raises:
EndpointError: a subclass of this error depending on the error returned by the AS
InvalidPushedAuthorizationResponse: if the returned response is not following the specifications
UnknownTokenEndpointError: for unknown/unhandled errors
"""
try:
data = response.json()
error = data["error"]
error_description = data.get("error_description")
error_uri = data.get("error_uri")
exception_class = self.exception_classes.get(error, UnknownTokenEndpointError)
exception = exception_class(response, error, error_description, error_uri)
except Exception as exc:
raise InvalidPushedAuthorizationResponse(response) from exc
raise exception
def userinfo(self, access_token: BearerToken | str) -> Any:
"""Call the UserInfo endpoint.
This sends a request to the UserInfo endpoint, with the specified access_token, and returns the parsed result.
Args:
access_token: the access token to use
Returns:
the [Response][requests.Response] returned by the userinfo endpoint.
"""
userinfo_endpoint = self._require_endpoint("userinfo_endpoint")
response = self.session.post(userinfo_endpoint, auth=BearerAuth(access_token))
return self.parse_userinfo_response(response)
def parse_userinfo_response(self, resp: requests.Response) -> Any:
"""Parse the response obtained by `userinfo()`.
Invoked by [userinfo()][requests_oauth2client.client.OAuth2Client.userinfo] to parse the response from the UserInfo endpoint, this will extract and return its JSON
content.
Args:
resp: a [Response][requests.Response] returned from the UserInfo endpoint.
Returns:
the parsed JSON content from this response.
"""
return resp.json()
@classmethod
def get_token_type(
cls,
token_type: str | None = None,
token: None | str | BearerToken | IdToken = None,
) -> str:
"""Get standardised token type identifiers.
Return a standardised token type identifier, based on a short `token_type`
hint and/or a token value.
Args:
token_type: a token_type hint, as `str`. May be "access_token", "refresh_token" or "id_token" (optional)
token: a token value, as an instance of `BearerToken` or IdToken, or as a `str`.
Returns:
the token_type as defined in the Token Exchange RFC8693.
"""
if not (token_type or token):
raise ValueError(
"Cannot determine type of an empty token without a token_type hint"
)
if token_type is None:
if isinstance(token, str):
raise ValueError(
"Cannot determine the type of provided token when it is a bare str. "
"Please specify a token_type."
)
elif isinstance(token, BearerToken):
return "urn:ietf:params:oauth:token-type:access_token"
elif isinstance(token, IdToken):
return "urn:ietf:params:oauth:token-type:id_token"
else:
raise TypeError(
"Unexpected type of token, please provide a string or a BearerToken or an IdToken.",
type(token),
)
elif token_type == "access_token":
if token is not None and not isinstance(token, (str, BearerToken)):
raise TypeError(
"The supplied token is not a BearerToken or a string representation of it.",
type(token),
)
return "urn:ietf:params:oauth:token-type:access_token"
elif token_type == "refresh_token":
if token is not None and isinstance(token, BearerToken) and not token.refresh_token:
raise ValueError("The supplied BearerToken doesn't have a refresh_token.")
return "urn:ietf:params:oauth:token-type:refresh_token"
elif token_type == "id_token":
if token is not None and not isinstance(token, (str, IdToken)):
raise TypeError(
"The supplied token is not an IdToken or a string representation of it.",
type(token),
)
return "urn:ietf:params:oauth:token-type:id_token"
else:
return {
"saml1": "urn:ietf:params:oauth:token-type:saml1",
"saml2": "urn:ietf:params:oauth:token-type:saml2",
"jwt": "urn:ietf:params:oauth:token-type:jwt",
}.get(token_type, token_type)
def revoke_access_token(
self,
access_token: BearerToken | str,
requests_kwargs: dict[str, Any] | None = None,
**revoke_kwargs: Any,
) -> bool:
"""Send a request to the Revocation Endpoint to revoke an access token.
Args:
access_token: the access token to revoke
requests_kwargs: additional parameters for the underlying requests.post() call
**revoke_kwargs: additional parameters to pass to the revocation endpoint
"""
return self.revoke_token(
access_token,
token_type_hint="access_token",
requests_kwargs=requests_kwargs,
**revoke_kwargs,
)
def revoke_refresh_token(
self,
refresh_token: str | BearerToken,
requests_kwargs: dict[str, Any] | None = None,
**revoke_kwargs: Any,
) -> bool:
"""Send a request to the Revocation Endpoint to revoke a refresh token.
Args:
refresh_token: the refresh token to revoke.
requests_kwargs: additional parameters to pass to the revocation endpoint.
**revoke_kwargs: additional parameters to pass to the revocation endpoint.
Returns:
`True` if the revocation request is successful, `False` if this client has no configured revocation endpoint.
"""
if isinstance(refresh_token, BearerToken):
if refresh_token.refresh_token is None:
raise ValueError("The supplied BearerToken doesn't have a refresh token.")
refresh_token = refresh_token.refresh_token
return self.revoke_token(
refresh_token,
token_type_hint="refresh_token",
requests_kwargs=requests_kwargs,
**revoke_kwargs,
)
def revoke_token(
self,
token: str | BearerToken,
token_type_hint: str | None = None,
requests_kwargs: dict[str, Any] | None = None,
**revoke_kwargs: Any,
) -> bool:
"""Send a Token Revocation request.
By default, authentication will be the same than the one used for the Token Endpoint.
Args:
token: the token to revoke.
token_type_hint: a token_type_hint to send to the revocation endpoint.
requests_kwargs: additional parameters to the underling call to requests.post()
**revoke_kwargs: additional parameters to send to the revocation endpoint.
Returns:
`True` if the revocation succeeds,
`False` if no revocation endpoint is present or a non-standardised error is returned.
"""
if not self.revocation_endpoint:
return False
requests_kwargs = requests_kwargs or {}
if token_type_hint == "refresh_token" and isinstance(token, BearerToken):
if token.refresh_token is None:
raise ValueError("The supplied BearerToken doesn't have a refresh token.")
token = token.refresh_token
data = dict(revoke_kwargs, token=str(token))
if token_type_hint:
data["token_type_hint"] = token_type_hint
response = self.session.post(
self.revocation_endpoint,
data=data,
auth=self.auth,
**requests_kwargs,
)
if response.ok:
return True
return self.on_revocation_error(response)
def on_revocation_error(self, response: requests.Response) -> bool:
"""Error handler for `revoke_token()`.
Invoked by [revoke_token()][requests_oauth2client.client.OAuth2Client.revoke_token] when the revocation endpoint returns an error.
Args:
response: the [Response][requests.Response] as returned by the Revocation Endpoint
Returns:
`False` to signal that an error occurred. May raise exceptions instead depending on the revocation response.
"""
try:
data = response.json()
error = data["error"]
error_description = data.get("error_description")
error_uri = data.get("error_uri")
exception_class = self.exception_classes.get(error, RevocationError)
exception = exception_class(error, error_description, error_uri)
except Exception:
return False
raise exception
def introspect_token(
self,
token: str | BearerToken,
token_type_hint: str | None = None,
requests_kwargs: dict[str, Any] | None = None,
**introspect_kwargs: Any,
) -> Any:
"""Send a request to the configured Introspection Endpoint.
Args:
token_type_hint: the token_type_hint to include in the request.
requests_kwargs: additional parameters to the underling call to requests.post()
**introspect_kwargs: additional parameters to send to the introspection endpoint.
Returns:
the response as returned by the Introspection Endpoint.
"""
introspection_endpoint = self._require_endpoint("introspection_endpoint")
requests_kwargs = requests_kwargs or {}
if token_type_hint == "refresh_token" and isinstance(token, BearerToken):
if token.refresh_token is None:
raise ValueError("The supplied BearerToken doesn't have a refresh token.")
token = token.refresh_token
data = dict(introspect_kwargs, token=str(token))
if token_type_hint:
data["token_type_hint"] = token_type_hint
response = self.session.post(
introspection_endpoint,
data=data,
auth=self.auth,
**requests_kwargs,
)
if response.ok:
return self.parse_introspection_response(response)
return self.on_introspection_error(response)
def parse_introspection_response(self, response: requests.Response) -> Any:
"""Parse Token Introspection Responses received by `introspect_token()`.
Invoked by [introspect_token()][requests_oauth2client.client.OAuth2Client.introspect_token] to parse the returned response.
This decodes the JSON content if possible, otherwise it returns the response as a string.
Args:
response: the [Response][requests.Response] as returned by the Introspection Endpoint.
Returns:
the decoded JSON content, or a `str` with the content.
"""
try:
return response.json()
except ValueError:
return response.text
def on_introspection_error(self, response: requests.Response) -> Any:
"""Error handler for `introspect_token()`.
Invoked by [introspect_token()][requests_oauth2client.client.OAuth2Client.introspect_token] to parse the returned response in the case an error is returned.
Args:
response: the response as returned by the Introspection Endpoint.
Returns:
usually raises exeptions. A subclass can return a default response instead.
"""
try:
data = response.json()
error = data["error"]
error_description = data.get("error_description")
error_uri = data.get("error_uri")
exception_class = self.exception_classes.get(error, IntrospectionError)
exception = exception_class(error, error_description, error_uri)
except Exception as exc:
raise UnknownIntrospectionError(response) from exc
raise exception
def backchannel_authentication_request(
self,
scope: None | str | Iterable[str] = "openid",
client_notification_token: str | None = None,
acr_values: None | str | Iterable[str] = None,
login_hint_token: str | None = None,
id_token_hint: str | None = None,
login_hint: str | None = None,
binding_message: str | None = None,
user_code: str | None = None,
requested_expiry: int | None = None,
private_jwk: Jwk | dict[str, Any] | None = None,
alg: str | None = None,
requests_kwargs: dict[str, Any] | None = None,
**ciba_kwargs: Any,
) -> BackChannelAuthenticationResponse:
"""Send a CIBA Authentication Request.
Args:
scope: the scope to include in the request.
client_notification_token: the Client Notification Token to include in the request.
acr_values: the acr values to include in the request.
login_hint_token: the Login Hint Token to include in the request.
id_token_hint: the ID Token Hint to include in the request.
login_hint: the Login Hint to include in the request.
binding_message: the Binding Message to include in the request.
user_code: the User Code to include in the request
requested_expiry: the Requested Expiry, in seconds, to include in the request.
private_jwk: the JWK to use to sign the request (optional)
alg: the alg to use to sign the request, if the provided JWK does not include an "alg" parameter.
requests_kwargs: additional parameters for
**ciba_kwargs: additional parameters to include in the request.
Returns:
a BackChannelAuthenticationResponse as returned by AS
"""
backchannel_authentication_endpoint = self._require_endpoint(
"backchannel_authentication_endpoint"
)
if not (login_hint or login_hint_token or id_token_hint):
raise ValueError(
"One of `login_hint`, `login_hint_token` or `ìd_token_hint` must be provided"
)
if (
(login_hint_token and id_token_hint)
or (login_hint and id_token_hint)
or (login_hint_token and login_hint)
):
raise ValueError(
"Only one of `login_hint`, `login_hint_token` or `ìd_token_hint` must be provided"
)
requests_kwargs = requests_kwargs or {}
if scope is not None and not isinstance(scope, str):
try:
scope = " ".join(scope)
except Exception as exc:
raise ValueError("Unsupported `scope` value") from exc
if acr_values is not None and not isinstance(acr_values, str):
try:
acr_values = " ".join(acr_values)
except Exception as exc:
raise ValueError("Unsupported `acr_values`") from exc
data = dict(
ciba_kwargs,
scope=scope,
client_notification_token=client_notification_token,
acr_values=acr_values,
login_hint_token=login_hint_token,
id_token_hint=id_token_hint,
login_hint=login_hint,
binding_message=binding_message,
user_code=user_code,
requested_expiry=requested_expiry,
)
if private_jwk is not None:
data = {"request": str(Jwt.sign(data, key=private_jwk, alg=alg))}
response = self.session.post(
backchannel_authentication_endpoint,
data=data,
auth=self.auth,
**requests_kwargs,
)
if response.ok:
return self.parse_backchannel_authentication_response(response)
return self.on_backchannel_authentication_error(response)
def parse_backchannel_authentication_response(
self, response: requests.Response
) -> BackChannelAuthenticationResponse:
"""Parse a response received by `backchannel_authentication_request()`.
Invoked by [backchannel_authentication_request()][requests_oauth2client.client.OAuth2Client.backchannel_authentication_request] to parse the response
returned by the BackChannel Authentication Endpoint.
Args:
response: the response returned by the BackChannel Authentication Endpoint.
Returns:
a `BackChannelAuthenticationResponse`
"""
try:
return BackChannelAuthenticationResponse(**response.json())
except TypeError as exc:
raise InvalidBackChannelAuthenticationResponse(response) from exc
def on_backchannel_authentication_error(
self, response: requests.Response
) -> BackChannelAuthenticationResponse:
"""Error handler for `backchannel_authentication_request()`.
Invoked by [backchannel_authentication_request()][requests_oauth2client.client.OAuth2Client.backchannel_authentication_request] to parse the response
returned by the BackChannel Authentication Endpoint, when it is an error.
Args:
response: the response returned by the BackChannel Authentication Endpoint.
Returns:
usually raises an exception. But a subclass can return a default response instead.
"""
try:
data = response.json()
error = data["error"]
error_description = data.get("error_description")
error_uri = data.get("error_uri")
exception_class = self.exception_classes.get(error, BackChannelAuthenticationError)
exception = exception_class(error, error_description, error_uri)
except Exception as exc:
raise InvalidBackChannelAuthenticationResponse(response) from exc
raise exception
def authorize_device(self, **data: Any) -> DeviceAuthorizationResponse:
"""Send a Device Authorization Request.
Args:
**data: additional data to send to the Device Authorization Endpoint
Returns:
a Device Authorization Response
"""
device_authorization_endpoint = self._require_endpoint("device_authorization_endpoint")
response = self.session.post(device_authorization_endpoint, data=data, auth=self.auth)
if response.ok:
return self.parse_device_authorization_response(response)
return self.on_device_authorization_error(response)
def parse_device_authorization_response(
self, response: requests.Response
) -> DeviceAuthorizationResponse:
"""Parse a Device Authorization Response received by `authorize_device()`.
Invoked by [authorize_device()][requests_oauth2client.client.OAuth2Client.authorize_device] to parse the response returned by the Device Authorization Endpoint.
Args:
response: the response returned by the Device Authorization Endpoint.
Returns:
a `DeviceAuthorizationResponse` as returned by AS
"""
device_authorization_response = DeviceAuthorizationResponse(**response.json())
return device_authorization_response
def on_device_authorization_error(
self, response: requests.Response
) -> DeviceAuthorizationResponse:
"""Error handler for `authorize_device()`.
Invoked by [authorize_device()][requests_oauth2client.client.OAuth2Client.authorize_device] to parse the response returned by the Device Authorization Endpoint, when that response is an error.
Args:
response: the response returned by the Device Authorization Endpoint.
Returns:
usually raises an Exception. But a subclass may return a default response instead.
"""
try:
data = response.json()
error = data["error"]
error_description = data.get("error_description")
error_uri = data.get("error_uri")
exception_class = self.exception_classes.get(error, DeviceAuthorizationError)
exception = exception_class(response, error, error_description, error_uri)
except Exception as exc:
raise InvalidDeviceAuthorizationResponse(response) from exc
raise exception
def update_authorization_server_public_keys(self) -> JwkSet:
"""Update the cached AS public keys by retrieving them from its `jwks_uri`.
Public keys are returned by this method, as a [JwkSet][jwskate.JwkSet].
They are also available in attribute `authorization_server_jwks`.
Returns:
the retrieved public keys
Raises:
ValueError: if no jwks_uri is configured
"""
jwks_uri = self._require_endpoint("jwks_uri")
jwks = self.session.get(jwks_uri, auth=None).json()
self.authorization_server_jwks = JwkSet(jwks)
return self.authorization_server_jwks
@classmethod
def from_discovery_endpoint(
cls,
url: str | None = None,
issuer: str | None = None,
auth: requests.auth.AuthBase | tuple[str, str] | str | None = None,
client_id: str | None = None,
client_secret: str | None = None,
private_key: Jwk | dict[str, Any] | None = None,
session: requests.Session | None = None,
**kwargs: Any,
) -> OAuth2Client:
"""Initialise an OAuth2Client based on Authorization Server Metadata.
This will retrieve the standardised metadata document available at `url`, and will extract all Endpoint Uris
from that document, will fetch the current public keys from its `jwks_uri`, then will initialize an OAuth2Client
based on those endpoints.
Args:
url: the url where the server metadata will be retrieved
auth: the authentication handler to use for client authentication
client_id: client ID
client_secret: client secret to use to authenticate the client
private_key: private key to sign client assertions
session: a requests Session to use to retrieve the document and initialise the client with
issuer: if an issuer is given, check that it matches the one from the retrieved document
Returns:
an OAuth2Client with endpoint initialized based on the obtained metadata
Raises:
ValueError: if neither `url` or `issuer` are suitable urls.
requests.HTTPError: if an error happens while fetching the documents
"""
if url is None and issuer is not None:
url = oidc_discovery_document_url(issuer)
if url is None:
raise ValueError("Please specify at least one of `issuer` or `url`")
validate_endpoint_uri(url, path=False)
session = session or requests.Session()
discovery = session.get(url).json()
jwks_uri = discovery.get("jwks_uri")
if jwks_uri:
jwks = JwkSet(session.get(jwks_uri).json())
return cls.from_discovery_document(
discovery,
issuer=issuer,
auth=auth,
session=session,
client_id=client_id,
client_secret=client_secret,
private_key=private_key,
authorization_server_jwks=jwks,
**kwargs,
)
@classmethod
def from_discovery_document(
cls,
discovery: dict[str, Any],
issuer: str | None = None,
auth: requests.auth.AuthBase | tuple[str, str] | str | None = None,
client_id: str | None = None,
client_secret: str | None = None,
private_key: Jwk | dict[str, Any] | None = None,
authorization_server_jwks: JwkSet | dict[str, Any] | None = None,
session: requests.Session | None = None,
https: bool = True,
**kwargs: Any,
) -> OAuth2Client:
"""Initialise an OAuth2Client, based on the server metadata from `discovery`.
Args:
discovery: a dict of server metadata, in the same format as retrieved from a discovery endpoint.
issuer: if an issuer is given, check that it matches the one mentioned in the document
auth: the authentication handler to use for client authentication
client_id: client ID
client_secret: client secret to use to authenticate the client
private_key: private key to sign client assertions
authorization_server_jwks: the current authorization server JWKS keys
session: a requests Session to use to retrieve the document and initialise the client with
https: if True, validates that urls in the discovery document use the https scheme
Returns:
an OAuth2Client
"""
if issuer and discovery.get("issuer") != issuer:
raise ValueError(
"Mismatching issuer value in discovery document: ",
issuer,
discovery.get("issuer"),
)
elif issuer is None:
issuer = discovery.get("issuer")
token_endpoint = discovery.get("token_endpoint")
if token_endpoint is None:
raise ValueError("token_endpoint not found in that discovery document")
validate_endpoint_uri(token_endpoint, https=https)
authorization_endpoint = discovery.get("authorization_endpoint")
if authorization_endpoint is not None:
validate_endpoint_uri(authorization_endpoint, https=https)
validate_endpoint_uri(token_endpoint, https=https)
revocation_endpoint = discovery.get("revocation_endpoint")
if revocation_endpoint is not None:
validate_endpoint_uri(revocation_endpoint, https=https)
introspection_endpoint = discovery.get("introspection_endpoint")
if introspection_endpoint is not None:
validate_endpoint_uri(introspection_endpoint, https=https)
userinfo_endpoint = discovery.get("userinfo_endpoint")
if userinfo_endpoint is not None:
validate_endpoint_uri(userinfo_endpoint, https=https)
jwks_uri = discovery.get("jwks_uri")
if jwks_uri is not None:
validate_endpoint_uri(userinfo_endpoint, https=https)
authorization_response_iss_parameter_supported = discovery.get(
"authorization_response_iss_parameter_supported", False
)
return cls(
token_endpoint=token_endpoint,
authorization_endpoint=authorization_endpoint,
revocation_endpoint=revocation_endpoint,
introspection_endpoint=introspection_endpoint,
userinfo_endpoint=userinfo_endpoint,
jwks_uri=jwks_uri,
authorization_server_jwks=authorization_server_jwks,
auth=auth,
client_id=client_id,
client_secret=client_secret,
private_key=private_key,
session=session,
issuer=issuer,
authorization_response_iss_parameter_supported=authorization_response_iss_parameter_supported,
**kwargs,
)
def __enter__(self) -> OAuth2Client:
"""Allow using OAuth2Client as a context-manager.
The Authorization Server public keys are retrieved on __enter__.
"""
self.update_authorization_server_public_keys()
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> bool: # noqa: D105
return True
def _require_endpoint(self, endpoint: str) -> str:
"""Check that a required endpoint url is set."""
url = getattr(self, endpoint, None)
if not url:
raise AttributeError(
f"No '{endpoint}' defined for this client. "
f"Please provide the URL for that endpoint when initializing your {self.__class__.__name__} instance."
)
return str(url) | /requests_oauth2client-1.3.0.tar.gz/requests_oauth2client-1.3.0/requests_oauth2client/client.py | 0.901791 | 0.240473 | client.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.