code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import io
import os
import stat
import subprocess
import time
import zlib
from Utils.Utilities import decodeBytesToUnicode
def calculateChecksums(filename):
"""
_calculateChecksums_
Get the adler32 and crc32 checksums of a file. Return None on error
Process line by line and adjust for known signed vs. unsigned issues
http://docs.python.org/library/zlib.html
The cksum UNIX command line tool implements a CRC32 checksum that is
different than any of the python algorithms, therefore open cksum
in a subprocess and feed it the same chunks of data that are used
to calculate the adler32 checksum.
"""
adler32Checksum = 1 # adler32 of an empty string
cksumProcess = subprocess.Popen("cksum", stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# the lambda basically creates an iterator function with zero
# arguments that steps through the file in 4096 byte chunks
with open(filename, 'rb') as f:
for chunk in iter((lambda: f.read(4096)), b''):
adler32Checksum = zlib.adler32(chunk, adler32Checksum)
cksumProcess.stdin.write(chunk)
cksumProcess.stdin.close()
cksumProcess.wait()
cksumStdout = cksumProcess.stdout.read().split()
cksumProcess.stdout.close()
# consistency check on the cksum output
filesize = os.stat(filename)[stat.ST_SIZE]
if len(cksumStdout) != 2 or int(cksumStdout[1]) != filesize:
raise RuntimeError("Something went wrong with the cksum calculation !")
cksumStdout[0] = decodeBytesToUnicode(cksumStdout[0])
return (format(adler32Checksum & 0xffffffff, '08x'), cksumStdout[0])
def tail(filename, nLines=20):
"""
_tail_
A version of tail
Adapted from code on http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
assert nLines >= 0
pos, lines = nLines + 1, []
# make sure only valid utf8 encoded chars will be passed along
with io.open(filename, 'r', encoding='utf8', errors='ignore') as f:
while len(lines) <= nLines:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = list(f)
pos *= 2
text = "".join(lines[-nLines:])
return text
def getFileInfo(filename):
"""
_getFileInfo_
Return file info in a friendly format
"""
filestats = os.stat(filename)
fileInfo = {'Name': filename,
'Size': filestats[stat.ST_SIZE],
'LastModification': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_MTIME])),
'LastAccess': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_ATIME]))}
return fileInfo
def findMagicStr(filename, matchString):
"""
_findMagicStr_
Parse a log file looking for a pattern string
"""
with io.open(filename, 'r', encoding='utf8', errors='ignore') as logfile:
# TODO: can we avoid reading the whole file
for line in logfile:
if matchString in line:
yield line
def getFullPath(name, envPath="PATH"):
"""
:param name: file name
:param envPath: any environment variable specified for path (PATH, PYTHONPATH, etc)
:return: full path if it is under PATH env
"""
for path in os.getenv(envPath).split(os.path.pathsep):
fullPath = os.path.join(path, name)
if os.path.exists(fullPath):
return fullPath
return None | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/Utils/FileTools.py | 0.555556 | 0.398641 | FileTools.py | pypi |
from builtins import str, bytes
def portForward(port):
"""
Decorator wrapper function for port forwarding of the REST calls of any
function to a given port.
Currently there are three constraints for applying this decorator.
1. The function to be decorated must be defined within a class and not being a static method.
The reason for that is because we need to be sure the function's signature will
always include the class instance as its first argument.
2. The url argument must be present as the second one in the positional argument list
of the decorated function (right after the class instance argument).
3. The url must follow the syntax specifications in RFC 1808:
https://tools.ietf.org/html/rfc1808.html
If all of the above constraints are fulfilled and the url is part of the
urlMangleList, then the url is parsed and the port is substituted with the
one provided as an argument to the decorator's wrapper function.
param port: The port to which the REST call should be forwarded.
"""
def portForwardDecorator(callFunc):
"""
The actual decorator
"""
def portMangle(callObj, url, *args, **kwargs):
"""
Function used to check if the url coming with the current argument list
is to be forwarded and if so change the port to the one provided as an
argument to the decorator wrapper.
:param classObj: This is the class object (slef from within the class)
which is always to be present in the signature of a
public method. We will never use this argument, but
we need it there for not breaking the positional
argument order
:param url: This is the actual url to be (eventually) forwarded
:param *args: The positional argument list coming from the original function
:param *kwargs: The keywords argument list coming from the original function
"""
forwarded = False
try:
if isinstance(url, str):
urlToMangle = 'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace('.cern.ch/', '.cern.ch:%d/' % port, 1)
forwarded = True
elif isinstance(url, bytes):
urlToMangle = b'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace(b'.cern.ch/', b'.cern.ch:%d/' % port, 1)
forwarded = True
except Exception:
pass
if forwarded:
return callFunc(callObj, newUrl, *args, **kwargs)
else:
return callFunc(callObj, url, *args, **kwargs)
return portMangle
return portForwardDecorator
class PortForward():
"""
A class with a call method implementing a simple way to use the functionality
provided by the protForward decorator as a pure functional call:
EXAMPLE:
from Utils.PortForward import PortForward
portForwarder = PortForward(8443)
url = 'https://cmsweb-testbed.cern.ch/couchdb'
url = portForwarder(url)
"""
def __init__(self, port):
"""
The init method for the PortForward call class. This one is supposed
to simply provide an initial class instance with a logger.
"""
self.port = port
def __call__(self, url):
"""
The call method for the PortForward class
"""
def dummyCall(self, url):
return url
return portForward(self.port)(dummyCall)(self, url) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/Utils/PortForward.py | 0.825273 | 0.496277 | PortForward.py | pypi |
from builtins import object
from functools import reduce
class Functor(object):
"""
A simple functor class used to construct a function call which later to be
applied on an (any type) object.
NOTE:
It expects a function in the constructor and an (any type) object
passed to the run or __call__ methods, which methods once called they
construct and return the following function:
func(obj, *args, **kwargs)
NOTE:
All the additional arguments which the function may take must be set in
the __init__ method. If any of them are passed during run time an error
will be raised.
:func:
The function to which the rest of the constructor arguments are about
to be attached and then the newly created function will be returned.
- The function needs to take at least one parameter since the object
passed to the run/__call__ methods will always be put as a first
argument to the function.
:Example:
def adder(a, b, *args, **kwargs):
if args:
print("adder args: %s" % args)
if kwargs:
print("adder kwargs: %s" % kwargs)
res = a + b
return res
>>> x=Functor(adder, 8, 'foo', bar=True)
>>> x(2)
adder args: foo
adder kwargs: {'bar': True}
adder res: 10
10
>>> x
<Pipeline.Functor instance at 0x7f319bbaeea8>
"""
def __init__(self, func, *args, **kwargs):
"""
The init method for class Functor
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, obj):
"""
The call method for class Functor
"""
return self.run(obj)
def run(self, obj):
return self.func(obj, *self.args, **self.kwargs)
class Pipeline(object):
"""
A simple Functional Pipeline Class: applies a set of functions to an object,
where the output of every previous function is an input to the next one.
"""
# NOTE:
# Similar and inspiring approaches but yet some different implementations
# are discussed in the following two links [1] & [2]. With a quite good
# explanation in [1], which helped a lot. All in all at the bottom always
# sits the reduce function.
# [1]
# https://softwarejourneyman.com/python-function-pipelines.html
# [2]
# https://gitlab.com/mc706/functional-pipeline
def __init__(self, funcLine=None, name=None):
"""
:funcLine: A list of functions or Functors of function + arguments (see
the Class definition above) that are to be applied sequentially
to the object.
- If any of the elements of 'funcLine' is a function, a direct
function call with the object as an argument is performed.
- If any of the elements of 'funcLine' is a Functor, then the
first argument of the Functor constructor is the function to
be evaluated and the object is passed as a first argument to
the function with all the rest of the arguments passed right
after it eg. the following Functor in the funcLine:
Functor(func, 'foo', bar=True)
will result in the following function call later when the
pipeline is executed:
func(obj, 'foo', bar=True)
:Example:
(using the adder function from above and an object of type int)
>>> pipe = Pipeline([Functor(adder, 5),
Functor(adder, 6),
Functor(adder, 7, "extraArg"),
Functor(adder, 8, update=True)])
>>> pipe.run(1)
adder res: 6
adder res: 12
adder args: extraArg
adder res: 19
adder kwargs: {'update': True}
adder res: 27
"""
self.funcLine = funcLine or []
self.name = name
def getPipelineName(self):
"""
__getPipelineName__
"""
name = self.name or "Unnamed Pipeline"
return name
def run(self, obj):
return reduce(lambda obj, functor: functor(obj), self.funcLine, obj) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/Utils/Pipeline.py | 0.750918 | 0.499512 | Pipeline.py | pypi |
# system modules
import os
import ssl
import time
import logging
import traceback
# third part library
try:
import jwt
except ImportError:
traceback.print_exc()
jwt = None
from Utils.Utilities import encodeUnicodeToBytes
# prevent "SSL: CERTIFICATE_VERIFY_FAILED" error
# this will cause pylint warning W0212, therefore we ignore it above
ssl._create_default_https_context = ssl._create_unverified_context
def readToken(name=None):
"""
Read IAM token either from environment or file name
:param name: ether file name containing token or environment name which hold the token value.
If not provided it will be assumed to read token from IAM_TOKEN environment.
:return: token or None
"""
if name and os.path.exists(name):
token = None
with open(name, 'r', encoding='utf-8') as istream:
token = istream.read()
return token
if name:
return os.environ.get(name)
return os.environ.get("IAM_TOKEN")
def tokenData(token, url="https://cms-auth.web.cern.ch/jwk", audUrl="https://wlcg.cern.ch/jwt/v1/any"):
"""
inspect and extract token data
:param token: token string
:param url: IAM provider URL
:param audUrl: audience string
"""
if not token or not jwt:
return {}
if isinstance(token, str):
token = encodeUnicodeToBytes(token)
jwksClient = jwt.PyJWKClient(url)
signingKey = jwksClient.get_signing_key_from_jwt(token)
key = signingKey.key
headers = jwt.get_unverified_header(token)
alg = headers.get('alg', 'RS256')
data = jwt.decode(
token,
key,
algorithms=[alg],
audience=audUrl,
options={"verify_exp": True},
)
return data
def isValidToken(token):
"""
check if given token is valid or not
:param token: token string
:return: true or false
"""
tokenDict = {}
tokenDict = tokenData(token)
exp = tokenDict.get('exp', 0) # expire, seconds since epoch
if not exp or exp < time.time():
return False
return True
class TokenManager():
"""
TokenManager class handles IAM tokens
"""
def __init__(self,
name=None,
url="https://cms-auth.web.cern.ch/jwk",
audUrl="https://wlcg.cern.ch/jwt/v1/any",
logger=None):
"""
Token manager reads IAM tokens either from file or env.
It caches token along with expiration timestamp.
By default the env variable to use is IAM_TOKEN.
:param name: string representing either file or env where we should read token from
:param url: IAM provider URL
:param audUrl: audience string
:param logger: logger object or none to use default one
"""
self.name = name
self.url = url
self.audUrl = audUrl
self.expire = 0
self.token = None
self.logger = logger if logger else logging.getLogger()
try:
self.token = self.getToken()
except Exception as exc:
self.logger.exception("Failed to get token. Details: %s", str(exc))
def getToken(self):
"""
Return valid token and sets its expire timestamp
"""
if not self.token or not isValidToken(self.token):
self.token = readToken(self.name)
tokenDict = {}
try:
tokenDict = tokenData(self.token, url=self.url, audUrl=self.audUrl)
self.logger.debug(tokenDict)
except Exception as exc:
self.logger.exception(str(exc))
raise
self.expire = tokenDict.get('exp', 0)
return self.token
def getLifetime(self):
"""
Return reamaining lifetime of existing token
"""
return self.expire - int(time.time()) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/Utils/TokenManager.py | 0.66061 | 0.165863 | TokenManager.py | pypi |
from copy import copy
from builtins import object
from time import time
class MemoryCacheException(Exception):
def __init__(self, message):
super(MemoryCacheException, self).__init__(message)
class MemoryCache():
__slots__ = ["lastUpdate", "expiration", "_cache"]
def __init__(self, expiration, initialData=None):
"""
Initializes cache object
:param expiration: expiration time in seconds
:param initialData: initial value for the cache
"""
self.lastUpdate = int(time())
self.expiration = expiration
self._cache = initialData
def __contains__(self, item):
"""
Check whether item is in the current cache
:param item: a simple object (string, integer, etc)
:return: True if the object can be found in the cache, False otherwise
"""
return item in self._cache
def __getitem__(self, keyName):
"""
If the cache is a dictionary, return that item from the cache. Else, raise an exception.
:param keyName: the key name from the dictionary
"""
if isinstance(self._cache, dict):
return copy(self._cache.get(keyName))
else:
raise MemoryCacheException("Cannot retrieve an item from a non-dict MemoryCache object: {}".format(self._cache))
def reset(self):
"""
Resets the cache to its current data type
"""
if isinstance(self._cache, (dict, set)):
self._cache.clear()
elif isinstance(self._cache, list):
del self._cache[:]
else:
raise MemoryCacheException("The cache needs to be reset manually, data type unknown")
def isCacheExpired(self):
"""
Evaluate whether the cache has already expired, returning
True if it did, otherwise it returns False
"""
return self.lastUpdate + self.expiration < int(time())
def getCache(self):
"""
Raises an exception if the cache has expired, otherwise returns
its data
"""
if self.isCacheExpired():
expiredSince = int(time()) - (self.lastUpdate + self.expiration)
raise MemoryCacheException("Memory cache expired for %d seconds" % expiredSince)
return self._cache
def setCache(self, inputData):
"""
Refresh the cache with the content provided (refresh its expiration as well)
This method enforces the user to not change the cache data type
:param inputData: data to store in the cache
"""
if not isinstance(self._cache, type(inputData)):
raise TypeError("Current cache data type: %s, while new value is: %s" %
(type(self._cache), type(inputData)))
self.reset()
self.lastUpdate = int(time())
self._cache = inputData
def addItemToCache(self, inputItem):
"""
Adds new item(s) to the cache, without resetting its expiration.
It, of course, only works for data caches of type: list, set or dict.
:param inputItem: additional item to be added to the current cached data
"""
if isinstance(self._cache, set) and isinstance(inputItem, (list, set)):
# extend another list or set into a set
self._cache.update(inputItem)
elif isinstance(self._cache, set) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a set
self._cache.add(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (list, set)):
# extend another list or set into a list
self._cache.extend(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a list
self._cache.append(inputItem)
elif isinstance(self._cache, dict) and isinstance(inputItem, dict):
self._cache.update(inputItem)
else:
msg = "Input item type: %s cannot be added to a cache type: %s" % (type(self._cache), type(inputItem))
raise TypeError("Cache and input item data type mismatch. %s" % msg) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/Utils/MemoryCache.py | 0.774796 | 0.226185 | MemoryCache.py | pypi |
from builtins import object
import logging
import time
import calendar
from datetime import tzinfo, timedelta
def gmtimeSeconds():
"""
Return GMT time in seconds
"""
return int(time.mktime(time.gmtime()))
def encodeTimestamp(secs):
"""
Encode second since epoch to a string GMT timezone representation
:param secs: input timestamp value (either int or float) in seconds since epoch
:return: time string in GMT timezone representation
"""
if not isinstance(secs, (int, float)):
raise Exception("Wrong input, should be seconds since epoch either int or float value")
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(int(secs)))
def decodeTimestamp(timeString):
"""
Decode timestamps in provided document
:param timeString: timestamp string represention in GMT timezone, see encodeTimestamp
:return: seconds since ecouch in GMT timezone
"""
if not isinstance(timeString, str):
raise Exception("Wrong input, should be time string in GMT timezone representation")
return calendar.timegm(time.strptime(timeString, "%Y-%m-%dT%H:%M:%SZ"))
def timeFunction(func):
"""
source: https://www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
Decorator function to measure how long a method/function takes to run
It returns a tuple with:
* wall clock time spent
* returned result of the function
* the function name
"""
def wrapper(*arg, **kw):
t1 = time.time()
res = func(*arg, **kw)
t2 = time.time()
return round((t2 - t1), 4), res, func.__name__
return wrapper
class CodeTimer(object):
"""
A context manager for timing function calls.
Adapted from https://www.blog.pythonlibrary.org/2016/05/24/python-101-an-intro-to-benchmarking-your-code/
Use like
with CodeTimer(label='Doing something'):
do_something()
"""
def __init__(self, label='The function', logger=None):
self.start = time.time()
self.label = label
self.logger = logger or logging.getLogger()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
runtime = round((end - self.start), 3)
self.logger.info(f"{self.label} took {runtime} seconds to complete")
class LocalTimezone(tzinfo):
"""
A required python 2 class to determine current timezone for formatting rfc3339 timestamps
Required for sending alerts to the MONIT AlertManager
Can be removed once WMCore starts using python3
Details of class can be found at: https://docs.python.org/2/library/datetime.html#tzinfo-objects
"""
def __init__(self):
super(LocalTimezone, self).__init__()
self.ZERO = timedelta(0)
self.STDOFFSET = timedelta(seconds=-time.timezone)
if time.daylight:
self.DSTOFFSET = timedelta(seconds=-time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return self.ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0 | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/Utils/Timers.py | 0.817028 | 0.254266 | Timers.py | pypi |
import copy
import unittest
class ExtendedUnitTestCase(unittest.TestCase):
"""
Class that can be imported to switch to 'mock'ed versions of
services.
"""
def assertContentsEqual(self, expected_obj, actual_obj, msg=None):
"""
A nested object comparison without regard for the ordering of contents. It asserts that
expected_obj and actual_obj contain the same elements and that their sub-elements are the same.
However, all sequences are allowed to contain the same elements, but in different orders.
"""
def traverse_dict(dictionary):
for key, value in list(dictionary.items()):
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
return
def get_dict_sortkey(x):
if isinstance(x, dict):
return list(x.keys())
else:
return x
def traverse_list(theList):
for value in theList:
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
theList.sort(key=get_dict_sortkey)
return
if not isinstance(expected_obj, type(actual_obj)):
self.fail(msg="The two objects are different type and cannot be compared: %s and %s" % (
type(expected_obj), type(actual_obj)))
expected = copy.deepcopy(expected_obj)
actual = copy.deepcopy(actual_obj)
if isinstance(expected, dict):
traverse_dict(expected)
traverse_dict(actual)
elif isinstance(expected, list):
traverse_list(expected)
traverse_list(actual)
else:
self.fail(msg="The two objects are different type (%s) and cannot be compared." % type(expected_obj))
return self.assertEqual(expected, actual) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/Utils/ExtendedUnitTestCase.py | 0.664758 | 0.501587 | ExtendedUnitTestCase.py | pypi |
from builtins import str, bytes
import subprocess
import os
import re
import zlib
import base64
import sys
from types import ModuleType, FunctionType
from gc import get_referents
def lowerCmsHeaders(headers):
"""
Lower CMS headers in provided header's dict. The WMCore Authentication
code check only cms headers in lower case, e.g. cms-xxx-yyy.
"""
lheaders = {}
for hkey, hval in list(headers.items()): # perform lower-case
# lower header keys since we check lower-case in headers
if hkey.startswith('Cms-') or hkey.startswith('CMS-'):
lheaders[hkey.lower()] = hval
else:
lheaders[hkey] = hval
return lheaders
def makeList(stringList):
"""
_makeList_
Make a python list out of a comma separated list of strings,
throws a ValueError if the input is not well formed.
If the stringList is already of type list, then return it untouched.
"""
if isinstance(stringList, list):
return stringList
if isinstance(stringList, str):
toks = stringList.lstrip(' [').rstrip(' ]').split(',')
if toks == ['']:
return []
return [str(tok.strip(' \'"')) for tok in toks]
raise ValueError("Can't convert to list %s" % stringList)
def makeNonEmptyList(stringList):
"""
_makeNonEmptyList_
Given a string or a list of strings, return a non empty list of strings.
Throws an exception in case the final list is empty or input data is not
a string or a python list
"""
finalList = makeList(stringList)
if not finalList:
raise ValueError("Input data cannot be an empty list %s" % stringList)
return finalList
def strToBool(string):
"""
Try to convert different variations of True or False (including a string
type object) to a boolean value.
In short:
* True gets mapped from: True, "True", "true", "TRUE".
* False gets mapped from: False, "False", "false", "FALSE"
* anything else will fail
:param string: expects a boolean or a string, but it could be anything else
:return: a boolean value, or raise an exception if value passed in is not supported
"""
if string is False or string is True:
return string
elif string in ["True", "true", "TRUE"]:
return True
elif string in ["False", "false", "FALSE"]:
return False
raise ValueError("Can't convert to bool: %s" % string)
def safeStr(string):
"""
_safeStr_
Cast simple data (int, float, basestring) to string.
"""
if not isinstance(string, (tuple, list, set, dict)):
return str(string)
raise ValueError("We're not supposed to convert %s to string." % string)
def diskUse():
"""
This returns the % use of each disk partition
"""
diskPercent = []
df = subprocess.Popen(["df", "-klP"], stdout=subprocess.PIPE)
output = df.communicate()[0]
output = decodeBytesToUnicode(output).split("\n")
for x in output:
split = x.split()
if split != [] and split[0] != 'Filesystem':
diskPercent.append({'mounted': split[5], 'percent': split[4]})
return diskPercent
def numberCouchProcess():
"""
This returns the number of couch process
"""
ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
process = ps.communicate()[0]
process = decodeBytesToUnicode(process).count('couchjs')
return process
def rootUrlJoin(base, extend):
"""
Adds a path element to the path within a ROOT url
"""
if base:
match = re.match("^root://([^/]+)/(.+)", base)
if match:
host = match.group(1)
path = match.group(2)
newpath = os.path.join(path, extend)
newurl = "root://%s/%s" % (host, newpath)
return newurl
return None
def zipEncodeStr(message, maxLen=5120, compressLevel=9, steps=100, truncateIndicator=" (...)"):
"""
_zipEncodeStr_
Utility to zip a string and encode it.
If zipped encoded length is greater than maxLen,
truncate message until zip/encoded version
is within the limits allowed.
"""
message = encodeUnicodeToBytes(message)
encodedStr = zlib.compress(message, compressLevel)
encodedStr = base64.b64encode(encodedStr)
if len(encodedStr) < maxLen or maxLen == -1:
return encodedStr
compressRate = 1. * len(encodedStr) / len(base64.b64encode(message))
# Estimate new length for message zip/encoded version
# to be less than maxLen.
# Also, append truncate indicator to message.
truncateIndicator = encodeUnicodeToBytes(truncateIndicator)
strLen = int((maxLen - len(truncateIndicator)) / compressRate)
message = message[:strLen] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
# If new length is not short enough, truncate
# recursively by steps
while len(encodedStr) > maxLen:
message = message[:-steps - len(truncateIndicator)] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
return encodedStr
def getSize(obj):
"""
_getSize_
Function to traverse an object and calculate its total size in bytes
:param obj: a python object
:return: an integer representing the total size of the object
Code extracted from Stack Overflow:
https://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
"""
# Custom objects know their class.
# Function objects seem to know way too much, including modules.
# Exclude modules as well.
BLACKLIST = type, ModuleType, FunctionType
if isinstance(obj, BLACKLIST):
raise TypeError('getSize() does not take argument of type: '+ str(type(obj)))
seen_ids = set()
size = 0
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, BLACKLIST) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
return size
def decodeBytesToUnicode(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of bytes (i.e. in py2 `str` or
`future.types.newbytes.newbytes`, in py3 `bytes`), then it is converted to
a sequence of unicode codepoints.
This function is useful for cleaning input data when using the
"unicode sandwich" approach, which involves converting bytes (i.e. strings
of type sequence of bytes) to unicode (i.e. strings of type sequence of
unicode codepoints, in py2 `unicode` or `future.types.newstr.newstr`,
in py3 `str` ) as soon as possible when recieving input data, and
converting unicode back to bytes as late as possible.
achtung!:
- converting unicode back to bytes is not covered by this function
- converting unicode back to bytes is not always necessary. when in doubt,
do not do it.
Reference: https://nedbatchelder.com/text/unipain.html
py2:
- "errors" can be: "strict", "ignore", "replace",
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, bytes):
return value.decode("utf-8", errors)
return value
def decodeBytesToUnicodeConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call decodeBytesToUnicode(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply decodeBytesToUnicode,
maintaining brevity.
Parameters
----------
value : any
passed to decodeBytesToUnicode
errors: str
passed to decodeBytesToUnicode
condition: boolean of object with attribute __bool__()
if True, then we run decodeBytesToUnicode. Usually PY2/PY3
"""
if condition:
return decodeBytesToUnicode(value, errors)
return value
def encodeUnicodeToBytes(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of unicode (i.e. in py2 `unicode` or
`future.types.newstr.newstr`, in py3 `str`), then it is converted to
a sequence of bytes.
This function is useful for encoding output data when using the
"unicode sandwich" approach, which involves converting unicode (i.e. strings
of type sequence of unicode codepoints) to bytes (i.e. strings of type
sequence of bytes, in py2 `str` or `future.types.newbytes.newbytes`,
in py3 `bytes`) as late as possible when passing a string to a third-party
function that only accepts bytes as input (pycurl's curl.setop is an
example).
py2:
- "errors" can be: "strict", "ignore", "replace", "xmlcharrefreplace"
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace",
"xmlcharrefreplace", "namereplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, str):
return value.encode("utf-8", errors)
return value
def encodeUnicodeToBytesConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call encodeUnicodeToBytes(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply encodeUnicodeToBytes,
maintaining brevity.
Parameters
----------
value : any
passed to encodeUnicodeToBytes
errors: str
passed to encodeUnicodeToBytes
condition: boolean of object with attribute __bool__()
if True, then we run encodeUnicodeToBytes. Usually PY2/PY3
"""
if condition:
return encodeUnicodeToBytes(value, errors)
return value | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/Utils/Utilities.py | 0.53777 | 0.283586 | Utilities.py | pypi |
import json
import urllib
from urllib.parse import urlparse, parse_qs, quote_plus
from collections import defaultdict
from Utils.CertTools import cert, ckey
from dbs.apis.dbsClient import aggFileLumis, aggFileParents
from WMCore.Services.pycurl_manager import getdata as multi_getdata
from Utils.PortForward import PortForward
def dbsListFileParents(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileParents API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file parents
"""
urls = ['%s/fileparents?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileParents
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsListFileLumis(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileLumis API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file lumis
"""
urls = ['%s/filelumis?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileLumis
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsBlockOrigin(dbsUrl, blocks):
"""
Concurrent counter part of DBS files API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of block origins for a given parent lfns
"""
urls = ['%s/blockorigin?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = None
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsParentFilesGivenParentDataset(dbsUrl, parentDataset, fInfo):
"""
Obtain parent files for given fileInfo object
:param dbsUrl: DBS URL
:param parentDataset: parent dataset name
:param fInfo: file info object
:return: list of parent files for given file info object
"""
portForwarder = PortForward(8443)
urls = []
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
urls.append(portForwarder(url))
func = None
uKey = None
rdict = getUrls(urls, func, uKey)
parentFiles = defaultdict(set)
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
url = portForwarder(url)
if url in rdict:
pFileList = rdict[url]
pFiles = {x['logical_file_name'] for x in pFileList}
parentFiles[fileInfo['logical_file_name']] = \
parentFiles[fileInfo['logical_file_name']].union(pFiles)
return parentFiles
def getUrls(urls, aggFunc, uKey=None):
"""
Perform parallel DBS calls for given set of urls and apply given aggregation
function to the results.
:param urls: list of DBS urls to call
:param aggFunc: aggregation function
:param uKey: url parameter to use for final dictionary
:return: dictionary of resuls where keys are urls and values are obtained results
"""
data = multi_getdata(urls, ckey(), cert())
rdict = {}
for row in data:
url = row['url']
code = int(row.get('code', 200))
error = row.get('error')
if code != 200:
msg = f"Fail to query {url}. Error: {code} {error}"
raise RuntimeError(msg)
if uKey:
key = urlParams(url).get(uKey)
else:
key = url
data = row.get('data', [])
res = json.loads(data)
if aggFunc:
rdict[key] = aggFunc(res)
else:
rdict[key] = res
return rdict
def urlParams(url):
"""
Return dictionary of URL parameters
:param url: URL link
:return: dictionary of URL parameters
"""
parsedUrl = urlparse(url)
rdict = parse_qs(parsedUrl.query)
for key, vals in rdict.items():
if len(vals) == 1:
rdict[key] = vals[0]
return rdict | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Services/DBS/DBSUtils.py | 0.572484 | 0.162746 | DBSUtils.py | pypi |
from __future__ import (division, print_function)
from builtins import str, bytes
from Utils.Utilities import encodeUnicodeToBytes
from io import BytesIO
import re
import xml.etree.cElementTree as ET
int_number_pattern = re.compile(r'(^[0-9-]$|^[0-9-][0-9]*$)')
float_number_pattern = re.compile(r'(^[-]?\d+\.\d*$|^\d*\.{1,1}\d+$)')
def adjust_value(value):
"""
Change null value to None.
"""
pat_float = float_number_pattern
pat_integer = int_number_pattern
if isinstance(value, str):
if value == 'null' or value == '(null)':
return None
elif pat_float.match(value):
return float(value)
elif pat_integer.match(value):
return int(value)
else:
return value
else:
return value
def xml_parser(data, prim_key):
"""
Generic XML parser
:param data: can be of type "file object", unicode string or bytes string
"""
if isinstance(data, (str, bytes)):
stream = BytesIO()
data = encodeUnicodeToBytes(data, "ignore")
stream.write(data)
stream.seek(0)
else:
stream = data
context = ET.iterparse(stream)
for event, elem in context:
row = {}
key = elem.tag
if key != prim_key:
continue
row[key] = elem.attrib
get_children(elem, event, row, key)
elem.clear()
yield row
def get_children(elem, event, row, key):
"""
xml_parser helper function. It gets recursively information about
children for given element tag. Information is stored into provided
row for given key. The change of notations can be applied during
parsing step by using provided notations dictionary.
"""
for child in elem.getchildren():
child_key = child.tag
child_data = child.attrib
if not child_data:
child_dict = adjust_value(child.text)
else:
child_dict = child_data
if child.getchildren(): # we got grand-children
if child_dict:
row[key][child_key] = child_dict
else:
row[key][child_key] = {}
if isinstance(child_dict, dict):
newdict = {child_key: child_dict}
else:
newdict = {child_key: {}}
get_children(child, event, newdict, child_key)
row[key][child_key] = newdict[child_key]
else:
if not isinstance(row[key], dict):
row[key] = {}
row[key].setdefault(child_key, [])
row[key][child_key].append(child_dict)
child.clear() | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Services/TagCollector/XMLUtils.py | 0.567697 | 0.201794 | XMLUtils.py | pypi |
from __future__ import division
from builtins import object
from datetime import timedelta, datetime
import socket
import json
import logging
from WMCore.Services.pycurl_manager import RequestHandler
from Utils.Timers import LocalTimezone
class AlertManagerAPI(object):
"""
A class used to send alerts via the MONIT AlertManager API
"""
def __init__(self, alertManagerUrl, logger=None):
self.alertManagerUrl = alertManagerUrl
# sender's hostname is added as an annotation
self.hostname = socket.gethostname()
self.mgr = RequestHandler()
self.ltz = LocalTimezone()
self.headers = {"Content-Type": "application/json"}
self.validSeverity = ["high", "medium", "low"]
self.logger = logger if logger else logging.getLogger()
def sendAlert(self, alertName, severity, summary, description, service, tag="wmcore", endSecs=600, generatorURL=""):
"""
:param alertName: a unique name for the alert
:param severity: low, medium, high
:param summary: a short description of the alert
:param description: a longer informational message with details about the alert
:param service: the name of the service firing an alert
:param tag: a unique tag used to help route the alert
:param endSecs: how many minutes until the alarm is silenced
:param generatorURL: this URL will be sent to AlertManager and configured as a clickable "Source" link in the web interface
AlertManager JSON format reference: https://www.prometheus.io/docs/alerting/latest/clients/
[
{
"labels": {
"alertname": "<requiredAlertName>",
"<labelname>": "<labelvalue>",
...
},
"annotations": {
"<labelname>": "<labelvalue>",
...
},
"startsAt": "<rfc3339>", # optional, will be current time if not present
"endsAt": "<rfc3339>",
"generatorURL": "<generator_url>" # optional
},
]
"""
if not self._isValidSeverity(severity):
return False
request = []
alert = {}
labels = {}
annotations = {}
# add labels
labels["alertname"] = alertName
labels["severity"] = severity
labels["tag"] = tag
labels["service"] = service
alert["labels"] = labels
# add annotations
annotations["hostname"] = self.hostname
annotations["summary"] = summary
annotations["description"] = description
alert["annotations"] = annotations
# In python3 we won't need the LocalTimezone class
# Will change to d = datetime.now().astimezone() + timedelta(seconds=endSecs)
d = datetime.now(self.ltz) + timedelta(seconds=endSecs)
alert["endsAt"] = d.isoformat("T")
alert["generatorURL"] = generatorURL
request.append(alert)
# need to do this because pycurl_manager only accepts dict and encoded strings type
params = json.dumps(request)
res = self.mgr.getdata(self.alertManagerUrl, params=params, headers=self.headers, verb='POST')
return res
def _isValidSeverity(self, severity):
"""
Used to check if the severity of the alert matches the valid levels: low, medium, high
:param severity: severity of the alert
:return: True or False
"""
if severity not in self.validSeverity:
logging.critical("Alert submitted to AlertManagerAPI with invalid severity: %s", severity)
return False
return True | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Services/AlertManager/AlertManagerAPI.py | 0.810554 | 0.161849 | AlertManagerAPI.py | pypi |
from builtins import str
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class DBCreator(DBFormatter):
"""
_DBCreator_
Generic class for creating database tables.
"""
def __init__(self, logger, dbinterface):
"""
_init_
Call the constructor of the parent class and create empty dictionaries
to hold table create statements, constraint statements and insert
statements.
"""
DBFormatter.__init__(self, logger, dbinterface)
self.create = {}
self.constraints = {}
self.inserts = {}
self.indexes = {}
def execute(self, conn = None, transaction = False):
"""
_execute_
Generic method to create tables and constraints by execute
sql statements in the create, and constraints dictionaries.
Before execution the keys assigned to the tables in the self.create
dictionary are sorted, to offer the possibilitiy of executing
table creation in a certain order.
"""
# create tables
for i in sorted(self.create.keys()):
try:
self.dbi.processData(self.create[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.create[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# create indexes
for i in self.indexes:
try:
self.dbi.processData(self.indexes[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.indexes[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# set constraints
for i in self.constraints:
try:
self.dbi.processData(self.constraints[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.constraints[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# insert permanent data
for i in self.inserts:
try:
self.dbi.processData(self.inserts[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.inserts[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
return True
def __str__(self):
"""
_str_
Return a well formatted text representation of the schema held in the
self.create, self.constraints, self.inserts, self.indexes dictionaries.
"""
string = ''
for i in self.create, self.constraints, self.inserts, self.indexes:
for j in i:
string = string + i[j].lstrip() + '\n'
return string | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Database/DBCreator.py | 0.526586 | 0.233969 | DBCreator.py | pypi |
from __future__ import division, print_function
from builtins import str, object
try:
import mongomock
except ImportError:
# this library should only be required by unit tests
mongomock = None
from pymongo import MongoClient, errors, IndexModel
from pymongo.errors import ConnectionFailure
class MongoDB(object):
"""
A simple wrapper class for creating a connection to a MongoDB instance
"""
def __init__(self, database=None, server=None,
create=False, collections=None, testIndexes=False,
logger=None, mockMongoDB=False, **kwargs):
"""
:databases: A database Name to connect to
:server: The server url or a list of (server:port) pairs (see https://docs.mongodb.com/manual/reference/connection-string/)
:create: A flag to trigger a database creation (if missing) during
object construction, together with collections if present.
:collections: A list of tuples describing collections with indexes -
the first element is considered the collection name, all
the rest elements are considered as indexes
:testIndexes: A flag to trigger index test and eventually to create them
if missing (TODO)
:mockMongoDB: A flag to trigger a database simulation instead of trying
to connect to a real database server.
:logger: Logger
Here follows a short list of usefull optional parameters accepted by the
MongoClient which may be passed as keyword arguments to the current module:
:replicaSet: The name of the replica set to connect to. The driver will verify
that all servers it connects to match this name. Implies that the
hosts specified are a seed list and the driver should attempt to
find all members of the set. Defaults to None.
:port: The port number on which to connect. It is overwritten by the ports
defined in the Url string or from the tuples listed in the server list
:connect: If True, immediately begin connecting to MongoDB in the background.
Otherwise connect on the first operation.
:directConnection: If True, forces the client to connect directly to the specified MongoDB
host as a standalone. If False, the client connects to the entire
replica set of which the given MongoDB host(s) is a part.
If this is True and a mongodb+srv:// URI or a URI containing multiple
seeds is provided, an exception will be raised.
:username: A string
:password: A string
Although username and password must be percent-escaped in a MongoDB URI,
they must not be percent-escaped when passed as parameters. In this example,
both the space and slash special characters are passed as-is:
MongoClient(username="user name", password="pass/word")
"""
self.server = server
self.logger = logger
self.mockMongoDB = mockMongoDB
if mockMongoDB and mongomock is None:
msg = "You are trying to mock MongoDB, but you do not have mongomock in the python path."
self.logger.critical(msg)
raise ImportError(msg)
# NOTE: We need to explicitely check for server availiability.
# From pymongo Documentation: https://pymongo.readthedocs.io/en/stable/api/pymongo/mongo_client.html
# """
# ...
# Starting with version 3.0 the :class:`MongoClient`
# constructor no longer blocks while connecting to the server or
# servers, and it no longer raises
# :class:`~pymongo.errors.ConnectionFailure` if they are
# unavailable, nor :class:`~pymongo.errors.ConfigurationError`
# if the user's credentials are wrong. Instead, the constructor
# returns immediately and launches the connection process on
# background threads.
# ...
# """
try:
if mockMongoDB:
self.client = mongomock.MongoClient()
self.logger.info("NOTICE: MongoDB is set to use mongomock, instead of real database.")
else:
self.client = MongoClient(host=self.server, **kwargs)
self.client.server_info()
self.client.admin.command('ping')
except ConnectionFailure as ex:
msg = "Could not connect to MongoDB server: %s. Server not available. \n"
msg += "Giving up Now."
self.logger.error(msg, self.server)
raise ex from None
except Exception as ex:
msg = "Could not connect to MongoDB server: %s. Due to unknown reason: %s\n"
msg += "Giving up Now."
self.logger.error(msg, self.server, str(ex))
raise ex from None
self.create = create
self.testIndexes = testIndexes
self.dbName = database
self.collections = collections or []
self._dbConnect(database)
if self.create and self.collections:
for collection in self.collections:
self._collCreate(collection, database)
if self.testIndexes and self.collections:
for collection in self.collections:
self._indexTest(collection[0], collection[1])
def _indexTest(self, collection, index):
pass
def _collTest(self, coll, db):
# self[db].list_collection_names()
pass
def collCreate(self, coll):
"""
A public method for _collCreate
"""
self._collCreate(coll, self.database)
def _collCreate(self, coll, db):
"""
A function used to explicitly create a collection with the relevant
indexes - used to avoid the Lazy Creating from MongoDB and eventual issues
in case we end up with no indexed collection, especially ones missing
the (`unique` index parameter)
:coll: A tuple describing one collection with indexes -
The first element is considered to be the collection name, and all
the rest of the elements are considered to be indexes.
The indexes must be of type IndexModel. See pymongo documentation:
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.create_index
:db: The database name for the collection
"""
collName = coll[0]
collIndexes = list(coll[1:])
try:
self.client[db].create_collection(collName)
except errors.CollectionInvalid:
# this error is thrown in case of an already existing collection
msg = "Collection '{}' Already exists in database '{}'".format(coll, db)
self.logger.warning(msg)
if collIndexes:
for index in collIndexes:
if not isinstance(index, IndexModel):
msg = "ERR: Bad Index type for collection %s" % collName
raise errors.InvalidName
try:
self.client[db][collName].create_indexes(collIndexes)
except Exception as ex:
msg = "Failed to create indexes on collection: %s\n%s" % (collName, str(ex))
self.logger.error(msg)
raise ex
def _dbTest(self, db):
"""
Tests database connection.
"""
# Test connection (from mongoDB documentation):
# https://api.mongodb.com/python/3.4.0/api/pymongo/mongo_client.html
try:
# The 'ismaster' command is cheap and does not require auth.
self.client.admin.command('ismaster')
except errors.ConnectionFailure as ex:
msg = "Server not available: %s" % str(ex)
self.logger.error(msg)
raise ex
# Test for database existence
if db not in self.client.list_database_names():
msg = "Missing MongoDB databases: %s" % db
self.logger.error(msg)
raise errors.InvalidName
def _dbCreate(self, db):
# creating an empty collection in order to create the database
_initColl = self.client[db].create_collection('_initCollection')
_initColl.insert_one({})
# NOTE: never delete the _initCollection if you want the database to persist
# self.client[db].drop_collection('_initCollection')
def dbConnect(self):
"""
A public method for _dbConnect
"""
self._dbConnect(self.database)
def _dbConnect(self, db):
"""
The function to be used for the initial database connection creation and testing
"""
try:
setattr(self, db, self.client[db])
if not self.mockMongoDB:
self._dbTest(db)
except errors.ConnectionFailure as ex:
msg = "Could not connect to MongoDB server for database: %s\n%s\n" % (db, str(ex))
msg += "Giving up Now."
self.logger.error(msg)
raise ex
except errors.InvalidName as ex:
msg = "Could not connect to a missing MongoDB databases: %s\n%s" % (db, str(ex))
self.logger.error(msg)
if self.create:
msg = "Trying to create: %s" % db
self.logger.error(msg)
try:
# self._dbCreate(getattr(self, db))
self._dbCreate(db)
except Exception as exc:
msg = "Could not create MongoDB databases: %s\n%s\n" % (db, str(exc))
msg += "Giving up Now."
self.logger.error(msg)
raise exc
try:
self._dbTest(db)
except Exception as exc:
msg = "Second failure while testing %s\n%s\n" % (db, str(exc))
msg += "Giving up Now."
self.logger.error(msg)
raise exc
msg = "Database %s successfully created" % db
self.logger.error(msg)
except Exception as ex:
msg = "General Exception while trying to connect to : %s\n%s" % (db, str(ex))
self.logger.error(msg)
raise ex | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Database/MongoDB.py | 0.660829 | 0.271949 | MongoDB.py | pypi |
import logging
import time
from WMCore.DataStructs.WMObject import WMObject
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class Transaction(WMObject):
dbi = None
def __init__(self, dbinterface = None):
"""
Get the connection from the DBInterface and open a new transaction on it
"""
self.dbi = dbinterface
self.conn = None
self.transaction = None
def begin(self):
if self.conn == None:
self.conn = self.dbi.connection()
if self.conn.closed:
self.conn = self.dbi.connection()
if self.transaction == None:
self.transaction = self.conn.begin()
return
def processData(self, sql, binds={}):
"""
Propagates the request to the proper dbcore backend,
and performs checks for lost (or closed) connection.
"""
result = self.dbi.processData(sql, binds, conn = self.conn,
transaction = True)
return result
def commit(self):
"""
Commit the transaction and return the connection to the pool
"""
if not self.transaction == None:
self.transaction.commit()
if not self.conn == None:
self.conn.close()
self.conn = None
self.transaction = None
def rollback(self):
"""
To be called if there is an exception and you want to roll back the
transaction and return the connection to the pool
"""
if self.transaction:
self.transaction.rollback()
if self.conn:
self.conn.close()
self.conn = None
self.transaction = None
return
def rollbackForError(self):
"""
This is called when handling a major exception. This is because sometimes
you can end up in a situation where the transaction appears open, but is not. In
this case, calling a rollback on the transaction will cause an exception, which
then destroys all logging and shutdown of the actual code.
Use only in components.
"""
try:
self.rollback()
except:
pass
return | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Database/Transaction.py | 0.487063 | 0.150809 | Transaction.py | pypi |
from copy import copy
from Utils.IteratorTools import grouper
import WMCore.WMLogging
from WMCore.DataStructs.WMObject import WMObject
from WMCore.Database.ResultSet import ResultSet
class DBInterface(WMObject):
"""
Base class for doing SQL operations using a SQLAlchemy engine, or
pre-exisitng connection.
processData will take a (list of) sql statements and a (list of)
bind variable dictionaries and run the statements on the DB. If
necessary it will substitute binds into the sql (MySQL).
TODO:
Add in some suitable exceptions in one or two places
Test the hell out of it
Support executemany()
"""
logger = None
engine = None
def __init__(self, logger, engine):
self.logger = logger
self.logger.info ("Instantiating base WM DBInterface")
self.engine = engine
self.maxBindsPerQuery = 500
def buildbinds(self, sequence, thename, therest=[{}]):
"""
Build a list of binds. Can be used recursively, e.g.:
buildbinds(file, 'file', buildbinds(pnn, 'location'), {'lumi':123})
TODO: replace with an appropriate map function
"""
binds = []
for r in sequence:
for i in self.makelist(therest):
thebind = copy(i)
thebind[thename] = r
binds.append(thebind)
return binds
def executebinds(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executebinds_
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
if b == None:
resultProxy = connection.execute(s)
else:
resultProxy = connection.execute(s, b)
if returnCursor:
return resultProxy
result = ResultSet()
result.add(resultProxy)
resultProxy.close()
return result
def executemanybinds(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executemanybinds_
b is a list of dictionaries for the binds, e.g.:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
see: http://www.gingerandjohn.com/archives/2004/02/26/cx_oracle-executemany-example/
Can't executemany() selects - so do each combination of binds here instead.
This will return a list of sqlalchemy.engine.base.ResultProxy object's
one for each set of binds.
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
s = s.strip()
if s.lower().endswith('select', 0, 6):
"""
Trying to select many
"""
if returnCursor:
result = []
for bind in b:
result.append(connection.execute(s, bind))
else:
result = ResultSet()
for bind in b:
resultproxy = connection.execute(s, bind)
result.add(resultproxy)
resultproxy.close()
return self.makelist(result)
"""
Now inserting or updating many
"""
result = connection.execute(s, b)
return self.makelist(result)
def connection(self):
"""
Return a connection to the engine (from the connection pool)
"""
return self.engine.connect()
def processData(self, sqlstmt, binds={}, conn=None,
transaction=False, returnCursor=False):
"""
set conn if you already have an active connection to reuse
set transaction = True if you already have an active transaction
"""
connection = None
try:
if not conn:
connection = self.connection()
else:
connection = conn
result = []
# Can take either a single statement or a list of statements and binds
sqlstmt = self.makelist(sqlstmt)
binds = self.makelist(binds)
if len(sqlstmt) > 0 and (len(binds) == 0 or (binds[0] == {} or binds[0] == None)):
# Should only be run by create statements
if not transaction:
#WMCore.WMLogging.sqldebug("transaction created in DBInterface")
trans = connection.begin()
for i in sqlstmt:
r = self.executebinds(i, connection=connection,
returnCursor=returnCursor)
result.append(r)
if not transaction:
trans.commit()
elif len(binds) > len(sqlstmt) and len(sqlstmt) == 1:
#Run single SQL statement for a list of binds - use execute_many()
if not transaction:
trans = connection.begin()
for subBinds in grouper(binds, self.maxBindsPerQuery):
result.extend(self.executemanybinds(sqlstmt[0], subBinds,
connection=connection, returnCursor=returnCursor))
if not transaction:
trans.commit()
elif len(binds) == len(sqlstmt):
# Run a list of SQL for a list of binds
if not transaction:
trans = connection.begin()
for i, s in enumerate(sqlstmt):
b = binds[i]
r = self.executebinds(s, b, connection=connection,
returnCursor=returnCursor)
result.append(r)
if not transaction:
trans.commit()
else:
self.logger.exception(
"DBInterface.processData Nothing executed, problem with your arguments")
self.logger.exception(
"DBInterface.processData SQL = %s" % sqlstmt)
WMCore.WMLogging.sqldebug('DBInterface.processData sql is %s items long' % len(sqlstmt))
WMCore.WMLogging.sqldebug('DBInterface.processData binds are %s items long' % len(binds))
assert_value = False
if len(binds) == len(sqlstmt):
assert_value = True
WMCore.WMLogging.sqldebug('DBInterface.processData are binds and sql same length? : %s' % (assert_value))
WMCore.WMLogging.sqldebug('sql: %s\n binds: %s\n, connection:%s\n, transaction:%s\n' %
(sqlstmt, binds, connection, transaction))
WMCore.WMLogging.sqldebug('type check:\nsql: %s\n binds: %s\n, connection:%s\n, transaction:%s\n' %
(type(sqlstmt), type(binds), type(connection), type(transaction)))
raise Exception("""DBInterface.processData Nothing executed, problem with your arguments
Probably mismatched sizes for sql (%i) and binds (%i)""" % (len(sqlstmt), len(binds)))
finally:
if not conn and connection != None:
connection.close() # Return connection to the pool
return result | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Database/DBCore.py | 0.414069 | 0.245401 | DBCore.py | pypi |
import copy
from WMCore.Database.DBCore import DBInterface
from WMCore.Database.ResultSet import ResultSet
def bindVarCompare(a):
"""
_bindVarCompare_
Bind variables are represented as a tuple with the first element being the
variable name and the second being it's position in the query. We sort on
the position in the query.
"""
return a[1]
def stringLengthCompare(a):
"""
_stringLengthCompare_
Sort comparison function to sort strings by length.
Since we want to sort from longest to shortest, this must be reversed when used
"""
return len(a)
class MySQLInterface(DBInterface):
def substitute(self, origSQL, origBindsList):
"""
_substitute_
Transform as set of bind variables from a list of dictionaries to a list
of tuples:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
Will be transformed into:
b = [ ('value1a', 'value2a'), ('value1b', 'value2b')]
Don't need to substitute in the binds as executemany does that
internally. But the sql will also need to be reformatted, such that
:bind_name becomes %s.
See: http://www.devshed.com/c/a/Python/MySQL-Connectivity-With-Python/5/
"""
if origBindsList == None:
return origSQL, None
origBindsList = self.makelist(origBindsList)
origBind = origBindsList[0]
bindVarPositionList = []
updatedSQL = copy.copy(origSQL)
# We process bind variables from longest to shortest to avoid a shorter
# bind variable matching a longer one. For example if we have two bind
# variables: RELEASE_VERSION and RELEASE_VERSION_ID the former will
# match against the latter, causing problems. We'll sort the variable
# names by length to guard against this.
bindVarNames = list(origBind)
bindVarNames.sort(key=stringLengthCompare, reverse=True)
bindPositions = {}
for bindName in bindVarNames:
searchPosition = 0
while True:
bindPosition = origSQL.lower().find(":%s" % bindName.lower(),
searchPosition)
if bindPosition == -1:
break
if bindPosition not in bindPositions:
bindPositions[bindPosition] = 0
bindVarPositionList.append((bindName, bindPosition))
searchPosition = bindPosition + 1
searchPosition = 0
while True:
bindPosition = updatedSQL.lower().find(":%s" % bindName.lower(),
searchPosition)
if bindPosition == -1:
break
left = updatedSQL[0:bindPosition]
right = updatedSQL[bindPosition + len(bindName) + 1:]
updatedSQL = left + "%s" + right
bindVarPositionList.sort(key=bindVarCompare)
mySQLBindVarsList = []
for origBind in origBindsList:
mySQLBindVars = []
for bindVarPosition in bindVarPositionList:
mySQLBindVars.append(origBind[bindVarPosition[0]])
mySQLBindVarsList.append(tuple(mySQLBindVars))
return (updatedSQL, mySQLBindVarsList)
def executebinds(self, s = None, b = None, connection = None,
returnCursor = False):
"""
_executebinds_
Execute a SQL statement that has a single set of bind variables.
Transform the bind variables into the format that MySQL expects.
"""
s, b = self.substitute(s, b)
return DBInterface.executebinds(self, s, b, connection, returnCursor)
def executemanybinds(self, s = None, b = None, connection = None,
returnCursor = False):
"""
_executemanybinds_
Execute a SQL statement that has multiple sets of bind variables.
Transform the bind variables into the format that MySQL expects.
"""
newsql, binds = self.substitute(s, b)
return DBInterface.executemanybinds(self, newsql, binds, connection,
returnCursor) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Database/MySQLCore.py | 0.637031 | 0.431105 | MySQLCore.py | pypi |
from __future__ import print_function
from builtins import str, bytes, int
from future.utils import viewitems
from Utils.PythonVersion import PY2
import sys
import types
class _EmptyClass(object):
pass
class JSONThunker(object):
"""
_JSONThunker_
Converts an arbitrary object to <-> from a jsonable object.
Will, for the most part "do the right thing" about various instance objects
by storing their class information along with their data in a dict. Handles
a recursion limit to prevent infinite recursion.
self.passThroughTypes - stores a list of types that should be passed
through unchanged to the JSON parser
self.blackListedModules - a list of modules that should not be stored in
the JSON.
"""
def __init__(self):
self.passThroughTypes = (type(None),
bool,
int,
float,
complex,
str,
bytes,
)
# objects that inherit from dict should be treated as a dict
# they don't store their data in __dict__. There was enough
# of those classes that it warrented making a special case
self.dictSortOfObjects = (('WMCore.Datastructs.Job', 'Job'),
('WMCore.WMBS.Job', 'Job'),
('WMCore.Database.CMSCouch', 'Document'))
# ditto above, but for lists
self.listSortOfObjects = (('WMCore.DataStructs.JobPackage', 'JobPackage'),
('WMCore.WMBS.JobPackage', 'JobPackage'),)
self.foundIDs = {}
# modules we don't want JSONed
self.blackListedModules = ('sqlalchemy.engine.threadlocal',
'WMCore.Database.DBCore',
'logging',
'WMCore.DAOFactory',
'WMCore.WMFactory',
'WMFactory',
'WMCore.Configuration',
'WMCore.Database.Transaction',
'threading',
'datetime')
def checkRecursion(self, data):
"""
handles checking for infinite recursion
"""
if id(data) in self.foundIDs:
if self.foundIDs[id(data)] > 5:
self.unrecurse(data)
return "**RECURSION**"
else:
self.foundIDs[id(data)] += 1
return data
else:
self.foundIDs[id(data)] = 1
return data
def unrecurse(self, data):
"""
backs off the recursion counter if we're returning from _thunk
"""
try:
self.foundIDs[id(data)] -= 1
except:
print("Could not find count for id %s of type %s data %s" % (id(data), type(data), data))
raise
def checkBlackListed(self, data):
"""
checks to see if a given object is from a blacklisted module
"""
try:
# special case
if data.__class__.__module__ == 'WMCore.Database.CMSCouch' and data.__class__.__name__ == 'Document':
data.__class__ = type({})
return data
if data.__class__.__module__ in self.blackListedModules:
return "Blacklisted JSON object: module %s, name %s, str() %s" % \
(data.__class__.__module__, data.__class__.__name__, str(data))
else:
return data
except Exception:
return data
def thunk(self, toThunk):
"""
Thunk - turns an arbitrary object into a JSONable object
"""
self.foundIDs = {}
data = self._thunk(toThunk)
return data
def unthunk(self, data):
"""
unthunk - turns a previously 'thunked' object back into a python object
"""
return self._unthunk(data)
def handleSetThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
tempDict = {'thunker_encoded_json': True, 'type': 'set'}
tempDict['set'] = self._thunk(list(toThunk))
self.unrecurse(toThunk)
return tempDict
def handleListThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
for k, v in enumerate(toThunk):
toThunk[k] = self._thunk(v)
self.unrecurse(toThunk)
return toThunk
def handleDictThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
special = False
tmpdict = {}
for k, v in viewitems(toThunk):
if type(k) == type(int):
special = True
tmpdict['_i:%s' % k] = self._thunk(v)
elif type(k) == type(float):
special = True
tmpdict['_f:%s' % k] = self._thunk(v)
else:
tmpdict[k] = self._thunk(v)
if special:
toThunk['thunker_encoded_json'] = self._thunk(True)
toThunk['type'] = self._thunk('dict')
toThunk['dict'] = tmpdict
else:
toThunk.update(tmpdict)
self.unrecurse(toThunk)
return toThunk
def handleObjectThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
toThunk = self.checkBlackListed(toThunk)
if isinstance(toThunk, (str, bytes)):
# things that got blacklisted
return toThunk
if hasattr(toThunk, '__to_json__'):
# Use classes own json thunker
toThunk2 = toThunk.__to_json__(self)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, dict):
toThunk2 = self.handleDictObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, list):
# a mother thunking list
toThunk2 = self.handleListObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
else:
try:
thunktype = '%s.%s' % (toThunk.__class__.__module__,
toThunk.__class__.__name__)
tempDict = {'thunker_encoded_json': True, 'type': thunktype}
tempDict[thunktype] = self._thunk(toThunk.__dict__)
self.unrecurse(toThunk)
return tempDict
except Exception as e:
tempDict = {'json_thunk_exception_': "%s" % e}
self.unrecurse(toThunk)
return tempDict
def handleDictObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_dict': True,
'type': thunktype,
thunktype: {}}
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
for k, v in viewitems(data):
tempDict[thunktype][k] = self._thunk(v)
return tempDict
def handleDictObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_dict', False)
thunktype = data.pop('type', False)
for k, v in viewitems(data):
if k == thunktype:
for k2, v2 in viewitems(data[thunktype]):
value[k2] = self._unthunk(v2)
else:
value.__dict__[k] = self._unthunk(v)
return value
def handleListObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_list': True,
'type': thunktype,
thunktype: []}
for k, v in enumerate(data):
tempDict['thunktype'].append(self._thunk(v))
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
return tempDict
def handleListObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_list', False)
thunktype = data.pop('type')
for k, v in viewitems(data[thunktype]):
setattr(value, k, self._unthunk(v))
for k, v in viewitems(data):
if k == thunktype:
continue
value.__dict__ = self._unthunk(v)
return value
def _thunk(self, toThunk):
"""
helper function for thunk, does the actual work
"""
if isinstance(toThunk, self.passThroughTypes):
return toThunk
elif type(toThunk) is list:
return self.handleListThunk(toThunk)
elif type(toThunk) is dict:
return self.handleDictThunk(toThunk)
elif type(toThunk) is set:
return self.handleSetThunk(toThunk)
elif type(toThunk) is types.FunctionType:
self.unrecurse(toThunk)
return "function reference"
elif isinstance(toThunk, object):
return self.handleObjectThunk(toThunk)
else:
self.unrecurse(toThunk)
raise RuntimeError(type(toThunk))
def _unthunk(self, jsondata):
"""
_unthunk - does the actual work for unthunk
"""
if PY2 and type(jsondata) is str:
return jsondata.encode("utf-8")
if type(jsondata) is dict:
if 'thunker_encoded_json' in jsondata:
# we've got a live one...
if jsondata['type'] == 'set':
newSet = set()
for i in self._unthunk(jsondata['set']):
newSet.add(self._unthunk(i))
return newSet
if jsondata['type'] == 'dict':
# We have a "special" dict
data = {}
for k, v in viewitems(jsondata['dict']):
tmp = self._unthunk(v)
if k.startswith('_i:'):
data[int(k.lstrip('_i:'))] = tmp
elif k.startswith('_f:'):
data[float(k.lstrip('_f:'))] = tmp
else:
data[k] = tmp
return data
else:
# spawn up an instance.. good luck
# here be monsters
# inspired from python's pickle code
ourClass = self.getThunkedClass(jsondata)
value = _EmptyClass()
if hasattr(ourClass, '__from_json__'):
# Use classes own json loader
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = ourClass.__from_json__(value, jsondata, self)
elif 'thunker_encoded_json' in jsondata and 'is_dict' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleDictObjectUnThunk(value, jsondata)
elif 'thunker_encoded_json' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleListObjectUnThunk(value, jsondata)
else:
raise RuntimeError('Could not unthunk a class. Code to try was removed because it had errors.')
return value
else:
data = {}
for k, v in viewitems(jsondata):
data[k] = self._unthunk(v)
return data
else:
return jsondata
@staticmethod
def getThunkedClass(jsondata):
"""
Work out the class from it's thunked json representation
"""
module = jsondata['type'].rsplit('.', 1)[0]
name = jsondata['type'].rsplit('.', 1)[1]
if (module == 'WMCore.Services.Requests') and (name == JSONThunker):
raise RuntimeError("Attempted to unthunk a JSONThunker..")
__import__(module)
mod = sys.modules[module]
ourClass = getattr(mod, name)
return ourClass | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Wrappers/JsonWrapper/JSONThunker.py | 0.443118 | 0.360208 | JSONThunker.py | pypi |
from builtins import next, str, object
from future.utils import viewitems
import xml.parsers.expat
class Node(object):
"""
_Node_
Really simple DOM like container to simplify parsing the XML file
and formatting the character data without all the whitespace guff
"""
def __init__(self, name, attrs):
self.name = str(name)
self.attrs = {}
self.text = None
for k, v in viewitems(attrs):
self.attrs.__setitem__(str(k), str(v))
self.children = []
def __str__(self):
result = " %s %s \"%s\"\n" % (self.name, self.attrs, self.text)
for child in self.children:
result += str(child)
return result
def coroutine(func):
"""
_coroutine_
Decorator method used to prime coroutines
"""
def start(*args,**kwargs):
cr = func(*args,**kwargs)
next(cr)
return cr
return start
def xmlFileToNode(reportFile):
"""
_xmlFileToNode_
Use expat and the build coroutine to parse the XML file and build
a node structure
"""
node = Node("JobReports", {})
expat_parse(open(reportFile, 'rb'),
build(node))
return node
def expat_parse(f, target):
"""
_expat_parse_
Expat based XML parsing that feeds a node building coroutine
"""
parser = xml.parsers.expat.ParserCreate()
#parser.buffer_size = 65536
parser.buffer_text = True
# a leftover from the py2py3 migration - TO BE REMOVED
# parser.returns_unicode = False
parser.StartElementHandler = \
lambda name,attrs: target.send(('start',(name,attrs)))
parser.EndElementHandler = \
lambda name: target.send(('end',name))
parser.CharacterDataHandler = \
lambda data: target.send(('text',data))
parser.ParseFile(f)
@coroutine
def build(topNode):
"""
_build_
Node structure builder that is fed from the expat_parse method
"""
nodeStack = [topNode]
charCache = []
while True:
event, value = (yield)
if event == "start":
charCache = []
newnode = Node(value[0], value[1])
nodeStack[-1].children.append(newnode)
nodeStack.append(newnode)
elif event == "text":
charCache.append(value)
else: # end
nodeStack[-1].text = str(''.join(charCache)).strip()
nodeStack.pop()
charCache = [] | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Algorithms/ParseXMLFile.py | 0.592431 | 0.276608 | ParseXMLFile.py | pypi |
from __future__ import print_function, division
from builtins import str, range
import math
import decimal
import logging
from WMCore.WMException import WMException
class MathAlgoException(WMException):
"""
Some simple math algo exceptions
"""
pass
def getAverageStdDev(numList):
"""
_getAverageStdDev_
Given a list, calculate both the average and the
standard deviation.
"""
if len(numList) < 0:
# Nothing to do here
return 0.0, 0.0
total = 0.0
average = 0.0
stdBase = 0.0
# Assemble the average
skipped = 0
for value in numList:
try:
if math.isnan(value) or math.isinf(value):
skipped += 1
continue
else:
total += value
except TypeError:
msg = "Attempted to take average of non-numerical values.\n"
msg += "Expected int or float, got %s: %s" % (value.__class__, value)
logging.error(msg)
logging.debug("FullList: %s", numList)
raise MathAlgoException(msg)
length = len(numList) - skipped
if length < 1:
return average, total
average = total / length
for value in numList:
tmpValue = value - average
stdBase += (tmpValue * tmpValue)
stdDev = math.sqrt(stdBase / length)
if math.isnan(average) or math.isinf(average):
average = 0.0
if math.isnan(stdDev) or math.isinf(average) or not decimal.Decimal(str(stdDev)).is_finite():
stdDev = 0.0
if not isinstance(stdDev, (int, float)):
stdDev = 0.0
return average, stdDev
def createHistogram(numList, nBins, limit):
"""
_createHistogram_
Create a histogram proxy (a list of bins) for a
given list of numbers
"""
average, stdDev = getAverageStdDev(numList = numList)
underflow = []
overflow = []
histEvents = []
histogram = []
for value in numList:
if math.fabs(average - value) <= limit * stdDev:
# Then we counted this event
histEvents.append(value)
elif average < value:
overflow.append(value)
elif average > value:
underflow.append(value)
if len(underflow) > 0:
binAvg, binStdDev = getAverageStdDev(numList=underflow)
histogram.append({'type': 'underflow',
'average': binAvg,
'stdDev': binStdDev,
'nEvents': len(underflow)})
if len(overflow) > 0:
binAvg, binStdDev = getAverageStdDev(numList=overflow)
histogram.append({'type': 'overflow',
'average': binAvg,
'stdDev': binStdDev,
'nEvents': len(overflow)})
if len(histEvents) < 1:
# Nothing to do?
return histogram
histEvents.sort()
upperBound = max(histEvents)
lowerBound = min(histEvents)
if lowerBound == upperBound:
# This is a problem
logging.debug("Only one value in the histogram!")
nBins = 1
upperBound = upperBound + 1
lowerBound = lowerBound - 1
binSize = (upperBound - lowerBound)/nBins
binSize = floorTruncate(binSize)
for x in range(nBins):
lowerEdge = floorTruncate(lowerBound + (x * binSize))
histogram.append({'type': 'standard',
'lowerEdge': lowerEdge,
'upperEdge': lowerEdge + binSize,
'average': 0.0,
'stdDev': 0.0,
'nEvents': 0})
for bin_ in histogram:
if bin_['type'] != 'standard':
continue
binList = []
for value in histEvents:
if value >= bin_['lowerEdge'] and value <= bin_['upperEdge']:
# Then we're in the bin
binList.append(value)
elif value > bin_['upperEdge']:
# Because this is a sorted list we are now out of the bin range
# Calculate our values and break
break
else:
continue
# If we get here, it's because we're out of values in the bin
# Time to do some math
if len(binList) < 1:
# Nothing to do here, leave defaults
continue
binAvg, binStdDev = getAverageStdDev(numList=binList)
bin_['average'] = binAvg
bin_['stdDev'] = binStdDev
bin_['nEvents'] = len(binList)
return histogram
def floorTruncate(value, precision=3):
"""
_floorTruncate_
Truncate a value to a set number of decimal points
Always truncates to a LOWER value, this is so that using it for
histogram binning creates values beneath the histogram lower edge.
"""
prec = math.pow(10, precision)
return math.floor(value * prec)/prec
def sortDictionaryListByKey(dictList, key, reverse=False):
"""
_sortDictionaryListByKey_
Given a list of dictionaries and a key with a numerical
value, sort that dictionary in order of that key's value.
NOTE: If the key does not exist, this will not raise an exception
This is because this is used for sorting of performance histograms
And not all histograms have the same value
"""
return sorted(dictList, key=lambda k: float(k.get(key, 0.0)), reverse=reverse)
def getLargestValues(dictList, key, n=1):
"""
_getLargestValues_
Take a list of dictionaries, sort them by the value of a
particular key, and return the n largest entries.
Key must be a numerical key.
"""
sortedList = sortDictionaryListByKey(dictList=dictList, key=key, reverse=True)
return sortedList[:n]
def validateNumericInput(value):
"""
_validateNumericInput_
Check that the value is actually an usable number
"""
value = float(value)
try:
if math.isnan(value) or math.isinf(value):
return False
except TypeError:
return False
return True
def calculateRunningAverageAndQValue(newPoint, n, oldM, oldQ):
"""
_calculateRunningAverageAndQValue_
Use the algorithm described in:
Donald E. Knuth (1998). The Art of Computer Programming, volume 2: Seminumerical Algorithms, 3rd ed.., p. 232. Boston: Addison-Wesley.
To calculate an average and standard deviation while getting data, the standard deviation
can be obtained from the so-called Q value with the following equation:
sigma = sqrt(Q/n)
This is also contained in the function calculateStdDevFromQ in this module. The average is equal to M.
"""
if not validateNumericInput(newPoint): raise MathAlgoException("Provided a non-valid newPoint")
if not validateNumericInput(n): raise MathAlgoException("Provided a non-valid n")
if n == 1:
M = newPoint
Q = 0.0
else:
if not validateNumericInput(oldM): raise MathAlgoException("Provided a non-valid oldM")
if not validateNumericInput(oldQ): raise MathAlgoException("Provided a non-valid oldQ")
M = oldM + (newPoint - oldM) / n
Q = oldQ + ((n - 1) * (newPoint - oldM) * (newPoint - oldM) / n)
return M, Q
def calculateStdDevFromQ(Q, n):
"""
_calculateStdDevFromQ_
If Q is the sum of the squared differences of some points to their average,
then the standard deviation is given by:
sigma = sqrt(Q/n)
This function calculates that formula
"""
if not validateNumericInput(Q): raise MathAlgoException("Provided a non-valid Q")
if not validateNumericInput(n): raise MathAlgoException("Provided a non-valid n")
sigma = math.sqrt(Q / n)
if not validateNumericInput(sigma): return 0.0
return sigma | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/Algorithms/MathAlgos.py | 0.591841 | 0.33565 | MathAlgos.py | pypi |
from builtins import str as newstr
import random, cherrypy
class RESTError(Exception):
"""Base class for REST errors.
.. attribute:: http_code
Integer, HTTP status code for this error. Also emitted as X-Error-HTTP
header value.
.. attribute:: app_code
Integer, application error code, to be emitted as X-REST-Status header.
.. attribute:: message
String, information about the error, to be emitted as X-Error-Detail
header. Should not contain anything sensitive, and in particular should
never include any unvalidated or unsafe data, e.g. input parameters or
data from a database. Normally a fixed label with one-to-one match with
the :obj:`app-code`. If the text exceeds 200 characters, it's truncated.
Since this is emitted as a HTTP header, it cannot contain newlines or
anything encoding-dependent.
.. attribute:: info
String, additional information beyond :obj:`message`, to be emitted as
X-Error-Info header. Like :obj:`message` should not contain anything
sensitive or unsafe, or text inappropriate for a HTTP response header,
and should be short enough to fit in 200 characters. This is normally
free form text to clarify why the error happened.
.. attribute:: errid
String, random unique identifier for this error, to be emitted as
X-Error-ID header and output into server logs when logging the error.
The purpose is that clients save this id when they receive an error,
and further error reporting or debugging can use this value to identify
the specific error, and for example to grep logs for more information.
.. attribute:: errobj
If the problem was caused by another exception being raised in the code,
reference to the original exception object. For example if the code dies
with an :class:`KeyError`, this is the original exception object. This
error is logged to the server logs when reporting the error, but no
information about it is returned to the HTTP client.
.. attribute:: trace
The origin of the exception as returned by :func:`format_exc`. The full
trace is emitted to the server logs, each line prefixed with timestamp.
This information is not returned to the HTTP client.
"""
http_code = None
app_code = None
message = None
info = None
errid = None
errobj = None
trace = None
def __init__(self, info = None, errobj = None, trace = None):
self.errid = "%032x" % random.randrange(1 << 128)
self.errobj = errobj
self.info = info
self.trace = trace
def __str__(self):
return "%s %s [HTTP %d, APP %d, MSG %s, INFO %s, ERR %s]" \
% (self.__class__.__name__, self.errid, self.http_code, self.app_code,
repr(self.message).replace("\n", " ~~ "),
repr(self.info).replace("\n", " ~~ "),
repr(self.errobj).replace("\n", " ~~ "))
class NotAcceptable(RESTError):
"Client did not specify format it accepts, or no compatible format was found."
http_code = 406
app_code = 201
message = "Not acceptable"
class UnsupportedMethod(RESTError):
"Client used HTTP request method which isn't supported for any API call."
http_code = 405
app_code = 202
message = "Request method not supported"
class MethodWithoutQueryString(RESTError):
"Client provided a query string which isn't acceptable for this request method."
http_code = 405
app_code = 203
message = "Query arguments not supported for this request method"
class APIMethodMismatch(RESTError):
"""Both the API and HTTP request methods are supported, but not in that
combination."""
http_code = 405
app_code = 204
message = "API not supported for this request method"
class APINotSpecified(RESTError):
"The request URL is missing API argument."
http_code = 400
app_code = 205
message = "API not specified"
class NoSuchInstance(RESTError):
"""The request URL is missing instance argument or the specified instance
does not exist."""
http_code = 404
app_code = 206
message = "No such instance"
class APINotSupported(RESTError):
"The request URL provides wrong API argument."
http_code = 404
app_code = 207
message = "API not supported"
class DataCacheEmpty(RESTError):
"The wmstats data cache has not be created."
http_code = 503
app_code = 208
message = "DataCache is Empty"
class DatabaseError(RESTError):
"""Parent class for database-related errors.
.. attribute: lastsql
A tuple of *(sql, binds, kwbinds),* where `sql` is the last SQL statement
executed and `binds`, `kwbinds` are the bind values used with it. Any
sensitive parts like passwords have already been censored from the `sql`
string. Note that for massive requests `binds` or `kwbinds` can get large.
These are logged out in the server logs when reporting the error, but no
information about these are returned to the HTTP client.
.. attribute: intance
String, the database instance for which the error occurred. This is
reported in the error message output to server logs, but no information
about this is returned to the HTTP client."""
lastsql = None
instance = None
def __init__(self, info = None, errobj = None, trace = None,
lastsql = None, instance = None):
RESTError.__init__(self, info, errobj, trace)
self.lastsql = lastsql
self.instance = instance
class DatabaseUnavailable(DatabaseError):
"""The instance argument is correct, but cannot connect to the database.
This error will only occur at initial attempt to connect to the database,
:class:`~.DatabaseConnectionError` is raised instead if the connection
ends prematurely after the transaction has already begun successfully."""
http_code = 503
app_code = 401
message = "Database unavailable"
class DatabaseConnectionError(DatabaseError):
"""Database was available when the operation started, but the connection
was lost or otherwise failed during the application operation."""
http_code = 504
app_code = 402
message = "Database connection failure"
class DatabaseExecutionError(DatabaseError):
"""Database operation failed."""
http_code = 500
app_code = 403
message = "Execution error"
class MissingParameter(RESTError):
"Client did not supply a parameter which is required."
http_code = 400
app_code = 301
message = "Missing required parameter"
class InvalidParameter(RESTError):
"Client supplied invalid value for a parameter."
http_code = 400
app_code = 302
message = "Invalid input parameter"
class MissingObject(RESTError):
"""An object required for the operation is missing. This might be a
pre-requisite needed to create a reference, or attempt to delete
an object which does not exist."""
http_code = 400
app_code = 303
message = "Required object is missing"
class TooManyObjects(RESTError):
"""Too many objects matched specified criteria. Usually this means
more than one object was matched, deleted, or inserted, when only
exactly one should have been subject to the operation."""
http_code = 400
app_code = 304
message = "Too many objects"
class ObjectAlreadyExists(RESTError):
"""An already existing object is on the way of the operation. This
is usually caused by uniqueness constraint violations when creating
new objects."""
http_code = 400
app_code = 305
message = "Object already exists"
class InvalidObject(RESTError):
"The specified object is invalid."
http_code = 400
app_code = 306
message = "Invalid object"
class ExecutionError(RESTError):
"""Input was in principle correct but there was an error processing
the request. This normally means either programming error, timeout, or
an unusual and unexpected problem with the database. For security reasons
little additional information is returned. If the problem persists, client
should contact service operators. The returned error id can be used as a
reference."""
http_code = 500
app_code = 403
message = "Execution error"
def report_error_header(header, val):
"""If `val` is non-empty, set CherryPy response `header` to `val`.
Replaces all newlines with "; " characters. If the resulting value is
longer than 200 characters, truncates it to the first 197 characters
and leaves a trailing ellipsis "..."."""
if val:
val = val.replace("\n", "; ")
if len(val) > 200: val = val[:197] + "..."
cherrypy.response.headers[header] = val
def report_rest_error(err, trace, throw):
"""Report a REST error: generate an appropriate log message, set the
response headers and raise an appropriate :class:`~.HTTPError`.
Normally `throw` would be True to translate the exception `err` into
a HTTP server error, but the function can also be called with `throw`
set to False if the purpose is merely to log an exception message.
:arg err: exception object.
:arg trace: stack trace to use in case `err` doesn't have one.
:arg throw: raise a :class:`~.HTTPError` if True."""
if isinstance(err, DatabaseError) and err.errobj:
offset = None
sql, binds, kwbinds = err.lastsql
if sql and err.errobj.args and hasattr(err.errobj.args[0], 'offset'):
offset = err.errobj.args[0].offset
sql = sql[:offset] + "<**>" + sql[offset:]
cherrypy.log("SERVER DATABASE ERROR %d/%d %s %s.%s %s [instance: %s] (%s);"
" last statement: %s; binds: %s, %s; offset: %s"
% (err.http_code, err.app_code, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
err.errid, err.instance, newstr(err.errobj).rstrip(),
sql, binds, kwbinds, offset))
for line in err.trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, RESTError):
if err.errobj:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s); derived from %s.%s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
newstr(err.errobj).rstrip()))
trace = err.trace
else:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, cherrypy.HTTPError):
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER HTTP ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(200)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.status)
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", err._message)
if throw: raise err
else:
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER OTHER ERROR %s.%s %s (%s)"
% (getattr(err, "__module__", "__builtins__"),
err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = 400
cherrypy.response.headers["X-Error-HTTP"] = 500
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", "Server error")
if throw: raise cherrypy.HTTPError(500, "Server error") | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/REST/Error.py | 0.835752 | 0.247783 | Error.py | pypi |
from builtins import str as newstr, bytes as newbytes
from WMCore.REST.Error import *
import math
import re
import numbers
from Utils.Utilities import decodeBytesToUnicodeConditional, encodeUnicodeToBytesConditional
from Utils.PythonVersion import PY3, PY2
def return_message(main_err, custom_err):
if custom_err:
return custom_err
return main_err
def _arglist(argname, kwargs):
val = kwargs.get(argname, None)
if val == None:
return []
elif not isinstance(val, list):
return [ val ]
else:
return val
def _check_rx(argname, val, custom_err = None):
if not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
return re.compile(val)
except:
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
def _check_str(argname, val, rx, custom_err = None):
"""
This is not really check val is ASCII.
2021 09: we are now using version 17.4.0 -> we do not need to convert to
bytes here anymore, we are using a recent verison of cherrypy.
We merged the funcionality of _check_str and _check_ustr into a single function
:type val: str or bytes (only utf8 encoded string) in py3, unicode or str in py2
:type rx: regex, compiled from native str (unicode in py3, bytes in py2)
"""
val = decodeBytesToUnicodeConditional(val, condition=PY3)
val = encodeUnicodeToBytesConditional(val, condition=PY2)
# `val` should now be a "native str" (unicode in py3, bytes in py2)
# here str has not been redefined. it is default `str` in both py2 and py3.
if not isinstance(val, str) or not rx.match(val):
raise InvalidParameter(return_message("Incorrect '%s' parameter %s %s" % (argname, type(val), val), custom_err))
return val
def _check_num(argname, val, bare, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Integral) and (not isinstance(val, (newstr, newbytes)) or (bare and not val.isdigit())):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = int(val)
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _check_real(argname, val, special, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Number) and not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = float(val)
if not special and (math.isnan(n) or math.isinf(n)):
raise InvalidParameter(return_message("Parameter '%s' improper value" % argname, custom_err))
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _validate_one(argname, param, safe, checker, optional, *args):
val = param.kwargs.get(argname, None)
if optional and val == None:
safe.kwargs[argname] = None
else:
safe.kwargs[argname] = checker(argname, val, *args)
del param.kwargs[argname]
def _validate_all(argname, param, safe, checker, *args):
safe.kwargs[argname] = [checker(argname, v, *args) for v in _arglist(argname, param.kwargs)]
if argname in param.kwargs:
del param.kwargs[argname]
def validate_rx(argname, param, safe, optional = False, custom_err = None):
"""Validates that an argument is a valid regexp.
Checks that an argument named `argname` exists in `param.kwargs`,
and it a string which compiles into a python regular expression.
If successful, the regexp object (not the string) is copied into
`safe.kwargs` and the string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_rx, optional, custom_err)
def validate_str(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
Accepts both unicode strings and utf8-encoded bytes strings as argument
string.
Accepts regex compiled only with "native strings", which means str in both
py2 and py3 (unicode in py3, bytes of utf8-encoded strings in py2)
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_ustr(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp,
During the py2->py3 modernization, _check_str and _check_ustr have been
merged into a single function called _check_str.
This function is now the same as validate_str, but is kept nonetheless
not to break our client's code.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_num(argname, param, safe, optional = False,
bare = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid integer number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is an int or a string convertible to a valid number. If successful
the integer value (not the string) is copied into `safe.kwargs`
and the original int/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
If `bare` is True, the number is required to be a pure digit sequence if it is a string.
Otherwise anything accepted by `int(val)` is acceted, including for
example leading white space or sign. Note that either way arbitrarily
large values are accepted; if you want to prevent abuse against big
integers, use the `minval` and `maxval` thresholds described below,
or check the length the of the string against some limit first.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_num, optional, bare, minval, maxval, custom_err)
def validate_real(argname, param, safe, optional = False,
special = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid real number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is float number or a string convertible to a valid number. If successful
the float value (not the string) is copied into `safe.kwargs`
and the original float/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
Anything accepted by `float(val)` is accepted, including for example
leading white space, sign and exponent. However NaN and +/- Inf are
rejected unless `special` is True.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_real, optional, special, minval, maxval, custom_err)
def validate_rxlist(argname, param, safe, custom_err = None):
"""Validates that an argument is an array of strings, each of which
can be compiled into a python regexp object.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which compiles into a regular expression.
If successful the array is copied into `safe.kwargs` and the value is
removed from `param.kwargs`. The value always becomes an array in
`safe.kwargs`, even if no or only one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_rx, custom_err)
def validate_strlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_ustrlist` instead if the argument string might need
to be converted from utf-8 into unicode first. Use this method only
for inputs which are meant to be bare strings.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_str, rx, custom_err)
def validate_ustrlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp once converted from utf-8 into unicode.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_strlist` instead if the argument strings should always
be bare strings. This one automatically converts everything into
unicode and expects input exclusively in utf-8, which may not be
appropriate constraints for some uses.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_ustr, rx, custom_err)
def validate_numlist(argname, param, safe, bare=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_num()`.
Checks that an argument named `argname` is either a single string/int or
an array of strings/int, each of which validates with `validate_num` and
`bare`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `kwsafe`, even if no or only one
argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_num, bare, minval, maxval, custom_err)
def validate_reallist(argname, param, safe, special=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_real()`.
Checks that an argument named `argname` is either a single string/float or
an array of strings/floats, each of which validates with `validate_real` and
`special`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `safe.kwargs`, even if no or only
one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_real, special, minval, maxval, custom_err)
def validate_no_more_input(param):
"""Verifies no more input is left in `param.args` or `param.kwargs`."""
if param.args:
raise InvalidParameter("Excess path arguments, not validated args='%s'" % param.args)
if param.kwargs:
raise InvalidParameter("Excess keyword arguments, not validated kwargs='%s'" % param.kwargs)
def validate_lengths(safe, *names):
"""Verifies that all `names` exist in `safe.kwargs`, are lists, and
all the lists have the same length. This is convenience function for
checking that an API accepting multiple values receives equal number
of values for all of its parameters."""
refname = names[0]
if refname not in safe.kwargs or not isinstance(safe.kwargs[refname], list):
raise InvalidParameter("Incorrect '%s' parameter" % refname)
reflen = len(safe.kwargs[refname])
for other in names[1:]:
if other not in safe.kwargs or not isinstance(safe.kwargs[other], list):
raise InvalidParameter("Incorrect '%s' parameter" % other)
elif len(safe.kwargs[other]) != reflen:
raise InvalidParameter("Mismatched number of arguments: %d %s vs. %d %s"
% (reflen, refname, len(safe.kwargs[other]), other)) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/REST/Validation.py | 0.600774 | 0.233335 | Validation.py | pypi |
from __future__ import print_function
import gzip
from builtins import str, bytes, object
from Utils.PythonVersion import PY3
from Utils.Utilities import encodeUnicodeToBytes, encodeUnicodeToBytesConditional
from future.utils import viewitems
import hashlib
import json
import xml.sax.saxutils
import zlib
from traceback import format_exc
import cherrypy
from WMCore.REST.Error import RESTError, ExecutionError, report_rest_error
try:
from cherrypy.lib import httputil
except ImportError:
from cherrypy.lib import http as httputil
def vary_by(header):
"""Add 'Vary' header for `header`."""
varies = cherrypy.response.headers.get('Vary', '')
varies = [x.strip() for x in varies.split(",") if x.strip()]
if header not in varies:
varies.append(header)
cherrypy.response.headers['Vary'] = ", ".join(varies)
def is_iterable(obj):
"""Check if `obj` is iterable."""
try:
iter(obj)
except TypeError:
return False
else:
return True
class RESTFormat(object):
def __call__(self, stream, etag):
"""Main entry point for generating output for `stream` using `etag`
object to generate ETag header. Returns a generator function for
producing a verbatim copy of `stream` item, including any premables
and trailers needed for the selected format. The intention is that
the caller will use the iterable to generate chunked HTTP transfer
encoding, or a simple result such as an image."""
# Make 'stream' iterable. We convert everything to chunks here.
# The final stream consumer will collapse small responses back
# to a single string. Convert files to 1MB chunks.
if stream is None:
stream = ['']
elif isinstance(stream, (str, bytes)):
stream = [stream]
elif hasattr(stream, "read"):
# types.FileType is not available anymore in python3,
# using it raises pylint W1624.
# Since cherrypy.lib.file_generator only uses the .read() attribute
# of a file, we simply check if stream.read() is present instead.
# https://github.com/cherrypy/cherrypy/blob/2a8aaccd649eb1011382c39f5cd93f76f980c0b1/cherrypy/lib/__init__.py#L64
stream = cherrypy.lib.file_generator(stream, 512 * 1024)
return self.stream_chunked(stream, etag, *self.chunk_args(stream))
def chunk_args(self, stream):
"""Return extra arguments needed for `stream_chunked()`. The default
return an empty tuple, so no extra arguments. Override in the derived
class if `stream_chunked()` needs preamble or trailer arguments."""
return tuple()
class XMLFormat(RESTFormat):
"""Format an iterable of objects into XML encoded in UTF-8.
Generates normally first a preamble, a stream of XML-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then XML encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if iterating over input is
deterministic. Beware in particular the key order for a dict is
arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is generated as an XML document whose top-level entity name
is defined by the label given at the formatter construction time. The
caller must define ``cherrypy.request.rest_generate_data`` to element
name for wrapping stream contents. Usually the top-level entity is the
application name and the ``cherrypy.request.rest_generate_data`` is
``result``.
Iterables are output as ``<array><i>ITEM</i><i>ITEM</i></array>``,
dictionaries as ``<dict><key>KEY</key><value>VALUE</value></dict>``.
`None` is output as empty contents, and hence there is no way to
distinguish `None` and an empty string from each other. Scalar types
are output as rendered by `str()`, but obviously XML encoding unsafe
characters. This class does not support formatting arbitrary types.
The formatter does not insert any spaces into the output. Although the
output is generated as a preamble, stream of objects, and trailer just
like by the `JSONFormatter`, each of which is a separate HTTP transfer
chunk, the output does *not* have guaranteed line-oriented structure
like the `JSONFormatter` produces. Note in particular that if the data
stream contains strings with newlines, the output will have arbitrary
line structure. On the other hand, as the output is well-formed XML,
virtually all SAX processors can read the stream incrementally even if
the client isn't able to fully preserve chunked HTTP transfer encoding."""
def __init__(self, label):
self.label = label
@staticmethod
def format_obj(obj):
"""Render an object `obj` into XML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
result = xml.sax.saxutils.escape(obj).encode("utf-8")
elif isinstance(obj, bytes):
result = xml.sax.saxutils.escape(obj)
elif isinstance(obj, (int, float, bool)):
result = xml.sax.saxutils.escape(str(obj)).encode("utf-8")
elif isinstance(obj, dict):
result = "<dict>"
for k, v in viewitems(obj):
result += "<key>%s</key><value>%s</value>" % \
(xml.sax.saxutils.escape(k).encode("utf-8"),
XMLFormat.format_obj(v))
result += "</dict>"
elif is_iterable(obj):
result = "<array>"
for v in obj:
result += "<i>%s</i>" % XMLFormat.format_obj(v)
result += "</array>"
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = XMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n"
preamble += "<%s>" % self.label
if cherrypy.request.rest_generate_preamble:
desc = self.format_obj(cherrypy.request.rest_generate_preamble)
preamble += "<desc>%s</desc>" % desc
preamble += "<%s>" % cherrypy.request.rest_generate_data
trailer = "</%s></%s>" % (cherrypy.request.rest_generate_data, self.label)
return preamble, trailer
class JSONFormat(RESTFormat):
"""Format an iterable of objects into JSON.
Generates normally first a preamble, a stream of JSON-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then JSON encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if `cjson.encode()` output is
deterministic for the input. Beware in particular the key order for a
dict is arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is always generated as a JSON dictionary. The caller must
define ``cherrypy.request.rest_generate_data`` as the key for actual
contents, usually something like "result". The `stream` value will be
generated as an array value for that key.
If ``cherrypy.request.rest_generate_preamble`` is a non-empty list, it
is output as the ``desc`` key value in the preamble before outputting
the `stream` contents. Otherwise the output consists solely of `stream`.
A common use of ``rest_generate_preamble`` is list of column labels
with `stream` an iterable of lists of column values.
The output is guaranteed to contain one line of preamble which starts a
dictionary and an array ("``{key: [``"), one line of JSON rendering of
each object in `stream`, with the first line starting with exactly one
space and second and subsequent lines starting with a comma, and one
final trailer line consisting of "``]}``". Each line is generated as a
HTTP transfer chunk. This format is fixed so readers can be constructed
to read and parse the stream incrementally one line at a time,
facilitating maximum throughput processing of the response."""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
obj = None
try:
for obj in stream:
chunk = comma + json.dumps(obj) + "\n"
etag.update(chunk)
yield chunk
comma = ","
except cherrypy.HTTPError:
raise
except GeneratorExit:
etag.invalidate()
trailer = None
raise
except Exception as exp:
print("ERROR, json.dumps failed to serialize %s, type %s\nException: %s" \
% (obj, type(obj), str(exp)))
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except cherrypy.HTTPError:
raise
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as JSON reply."""
comma = ""
preamble = "{"
trailer = "]}\n"
if cherrypy.request.rest_generate_preamble:
desc = json.dumps(cherrypy.request.rest_generate_preamble)
preamble += '"desc": %s' % desc
comma = ", "
preamble += '%s"%s": [\n' % (comma, cherrypy.request.rest_generate_data)
return preamble, trailer
class PrettyJSONFormat(JSONFormat):
""" Format used for human, (web browser)"""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = comma + json.dumps(obj, indent=2)
etag.update(chunk)
yield chunk
comma = ","
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
class PrettyJSONHTMLFormat(PrettyJSONFormat):
""" Format used for human, (web browser) wrap around html tag on json"""
@staticmethod
def format_obj(obj):
"""Render an object `obj` into HTML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
obj = xml.sax.saxutils.quoteattr(obj)
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, bytes):
obj = xml.sax.saxutils.quoteattr(str(obj, "utf-8"))
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, (int, float, bool)):
result = "%s" % obj
elif isinstance(obj, dict):
result = "<ul>"
for k, v in viewitems(obj):
result += "<li><b>%s</b>: %s</li>" % (k, PrettyJSONHTMLFormat.format_obj(v))
result += "</ul>"
elif is_iterable(obj):
empty = True
result = "<details open><ul>"
for v in obj:
empty = False
result += "<li>%s</li>" % PrettyJSONHTMLFormat.format_obj(v)
result += "</ul></details>"
if empty:
result = ""
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = PrettyJSONHTMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<html><body>"
trailer = "</body></html>"
return preamble, trailer
class RawFormat(RESTFormat):
"""Format an iterable of objects as raw data.
Generates raw data completely unmodified, for example image data or
streaming arbitrary external data files including even plain text.
Computes an ETag on the output in the process. The result is always
chunked, even simple strings on input. Usually small enough responses
will automatically be converted back to a single string response post
compression and ETag processing.
Any exceptions raised by input stream are reported to `report_rest_error`
and swallowed, as this is normally used to generate output for CherryPy
responses, which cannot handle exceptions reasonably after the output
generation begins; later processing may reconvert those back to exceptions
however (cf. stream_maybe_etag()). A X-REST-Status trailer header is added
if (and only if) an exception occurs; the client must inspect that to find
out if it got the complete output. There is normally 'X-REST-Status: 100'
in normal response headers, and it remains valid in case of success.
No ETag header is generated in case of an exception."""
def stream_chunked(self, stream, etag):
"""Generator for actually producing the output."""
try:
for chunk in stream:
etag.update(chunk)
yield chunk
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
except BaseException:
etag.invalidate()
raise
class DigestETag(object):
"""Compute hash digest over contents for ETag header."""
algorithm = None
def __init__(self, algorithm=None):
"""Prepare ETag computer."""
self.digest = hashlib.new(algorithm or self.algorithm)
def update(self, val):
"""Process response data `val`."""
if self.digest:
self.digest.update(encodeUnicodeToBytes(val))
def value(self):
"""Return ETag header value for current input."""
return self.digest and '"%s"' % self.digest.hexdigest()
def invalidate(self):
"""Invalidate the ETag calculator so value() will return None."""
self.digest = None
class MD5ETag(DigestETag):
"""Compute MD5 hash over contents for ETag header."""
algorithm = 'md5'
class SHA1ETag(DigestETag):
"""Compute SHA1 hash over contents for ETag header."""
algorithm = 'sha1'
def _stream_compress_identity(reply, *args):
"""Streaming compressor which returns original data unchanged."""
return reply
def _stream_compress_deflate(reply, compress_level, max_chunk):
"""Streaming compressor for the 'deflate' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
# Create zlib compression object, with raw data stream (negative window size)
z = zlib.compressobj(compress_level, zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
# Data pending compression. We only take entire chunks from original
# reply. Then process reply one chunk at a time. Whenever we have enough
# data to compress, spit it out flushing the zlib engine entirely, so we
# respect original chunk boundaries.
npending = 0
pending = []
for chunk in reply:
pending.append(chunk)
npending += len(chunk)
if npending >= max_chunk:
part = z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FULL_FLUSH)
pending = []
npending = 0
yield part
# Crank the compressor one more time for remaining output.
if npending:
yield z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FINISH)
def _stream_compress_gzip(reply, compress_level, *args):
"""Streaming compressor for the 'gzip' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
data = []
for chunk in reply:
data.append(chunk)
if data:
yield gzip.compress(encodeUnicodeToBytes("".join(data)), compress_level)
# : Stream compression methods.
_stream_compressor = {
'identity': _stream_compress_identity,
'deflate': _stream_compress_deflate,
'gzip': _stream_compress_gzip
}
def stream_compress(reply, available, compress_level, max_chunk):
"""If compression has been requested via Accept-Encoding request header,
and is granted for this response via `available` compression methods,
convert the streaming `reply` into another streaming response which is
compressed at the exact chunk boundaries of the original response,
except that individual chunks may be coalesced up to `max_chunk` size.
The `compression_level` tells how hard to compress, zero disables the
compression entirely."""
global _stream_compressor
for enc in cherrypy.request.headers.elements('Accept-Encoding'):
if enc.value not in available:
continue
elif enc.value in _stream_compressor and compress_level > 0:
# Add 'Vary' header for 'Accept-Encoding'.
vary_by('Accept-Encoding')
# Compress contents at original chunk boundaries.
if 'Content-Length' in cherrypy.response.headers:
del cherrypy.response.headers['Content-Length']
cherrypy.response.headers['Content-Encoding'] = enc.value
return _stream_compressor[enc.value](reply, compress_level, max_chunk)
return reply
def _etag_match(status, etagval, match, nomatch):
"""Match ETag value against any If-Match / If-None-Match headers."""
# Execute conditions only for status 2xx. We only handle GET/HEAD
# requests here, it makes no sense to try to do this for PUT etc.
# as they need to be handled as request pre-condition, not in the
# streaming out part here.
if cherrypy.request.method in ('GET', 'HEAD'):
status, dummyReason, dummyMsg = httputil.valid_status(status)
if status >= 200 and status <= 299:
if match and ("*" in match or etagval in match):
raise cherrypy.HTTPError(412, "Precondition on ETag %s failed" % etagval)
if nomatch and ("*" in nomatch or etagval in nomatch):
raise cherrypy.HTTPRedirect([], 304)
def _etag_tail(head, tail, etag):
"""Generator which first returns anything in `head`, then `tail`.
Sets ETag header at the end to value of `etag` if it's defined and
yields a value."""
for chunk in head:
yield encodeUnicodeToBytes(chunk)
for chunk in tail:
yield encodeUnicodeToBytes(chunk)
etagval = (etag and etag.value())
if etagval:
cherrypy.response.headers["ETag"] = etagval
def stream_maybe_etag(size_limit, etag, reply):
"""Maybe generate ETag header for the response, and handle If-Match
and If-None-Match request headers. Consumes the reply until at most
`size_limit` bytes. If the response fits into that size, adds the
ETag header and matches it against any If-Match / If-None-Match
request headers and replies appropriately.
If the response is fully buffered, and the `reply` generator actually
results in an error and sets X-Error-HTTP / X-Error-Detail headers,
converts that error back into a real HTTP error response. Otherwise
responds with the fully buffered body directly, without generator
and chunking. In other words, responses smaller than `size_limit`
are always fully buffered and replied immediately without chunking.
If the response is not fully buffered, it's guaranteed to be output
at original chunk boundaries.
Note that if this function is fed the output from `stream_compress()`
as it normally would be, the `size_limit` constrains the compressed
size, and chunk boundaries correspond to compressed chunks."""
req = cherrypy.request
res = cherrypy.response
match = [str(x) for x in (req.headers.elements('If-Match') or [])]
nomatch = [str(x) for x in (req.headers.elements('If-None-Match') or [])]
# If ETag is already set, match conditions and output without buffering.
etagval = res.headers.get('ETag', None)
if etagval:
_etag_match(res.status or 200, etagval, match, nomatch)
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail([], reply, None)
# Buffer up to size_limit bytes internally. This interally builds up the
# ETag value inside 'etag'. In case of exceptions the ETag invalidates.
# If we exceed the limit, fall back to streaming without checking ETag
# against If-Match/If-None-Match. We'll still set the ETag in the trailer
# headers, so clients which understand trailers will get the value; most
# clients including browsers will ignore them.
size = 0
result = []
for chunk in reply:
result.append(chunk)
size += len(chunk)
if size > size_limit:
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail(result, reply, etag)
# We've buffered the entire response, but it may be an error reply. The
# generator code does not know if it's allowed to raise exceptions, so
# it swallows all errors and converts them into X-* headers. We recover
# the original HTTP response code and message from X-Error-{HTTP,Detail}
# headers, if any are present.
err = res.headers.get('X-Error-HTTP', None)
if err:
message = res.headers.get('X-Error-Detail', 'Original error lost')
raise cherrypy.HTTPError(int(err), message)
# OK, we buffered the entire reply and it's ok. Check ETag match criteria.
# The original stream generator must guarantee that if it fails it resets
# the 'etag' value, even if the error handlers above didn't run.
etagval = etag.value()
if etagval:
res.headers['ETag'] = etagval
_etag_match(res.status or 200, etagval, match, nomatch)
# OK, respond with the buffered reply as a plain string.
res.headers['Content-Length'] = size
# TODO investigate why `result` is a list of bytes strings in py3
# The current solution seems to work in both py2 and py3
resp = b"" if PY3 else ""
for item in result:
resp += encodeUnicodeToBytesConditional(item, condition=PY3)
assert len(resp) == size
return resp | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/REST/Format.py | 0.843605 | 0.218909 | Format.py | pypi |
from Utils.Utilities import encodeUnicodeToBytes
from future.utils import viewitems, viewvalues, listitems
import os, hmac, hashlib, cherrypy
from tempfile import NamedTemporaryFile
from Utils.PythonVersion import PY3
from WMCore.REST.Main import RESTMain
from WMCore.REST.Auth import authz_canonical
from WMCore.Configuration import Configuration
def fake_authz_headers(hmac_key, method = 'HNLogin',
login='testuser', name='Test User',
dn="/test/dn", roles={}, format="list"):
"""Create fake authentication and authorisation headers compatible
with the CMSWEB front-ends. Assumes you have the HMAC signing key
the back-end will use to validate the headers.
:arg str hmac_key: binary key data for signing headers.
:arg str method: authentication method, one of X509Cert, X509Proxy,
HNLogin, HostIP, AUCookie or None.
:arg str login: account login name.
:arg str name: account user name.
:arg str dn: account X509 subject.
:arg dict roles: role dictionary, each role with 'site' and 'group' lists.
:returns: list of header name, value tuples to add to a HTTP request."""
headers = { 'cms-auth-status': 'OK', 'cms-authn-method': method }
if login:
headers['cms-authn-login'] = login
if name:
headers['cms-authn-name'] = name
if dn:
headers['cms-authn-dn'] = dn
for name, role in viewitems(roles):
name = 'cms-authz-' + authz_canonical(name)
headers[name] = []
for r in 'site', 'group':
if r in role:
headers[name].extend(["%s:%s" % (r, authz_canonical(v)) for v in role[r]])
headers[name] = " ".join(headers[name])
prefix = suffix = ""
hkeys = list(headers)
for hk in sorted(hkeys):
if hk != 'cms-auth-status':
prefix += "h%xv%x" % (len(hk), len(headers[hk]))
suffix += "%s%s" % (hk, headers[hk])
msg = prefix + "#" + suffix
if PY3:
hmac_key = encodeUnicodeToBytes(hmac_key)
msg = encodeUnicodeToBytes(msg)
cksum = hmac.new(hmac_key, msg, hashlib.sha1).hexdigest()
headers['cms-authn-hmac'] = cksum
if format == "list":
return listitems(headers)
else:
return headers
def fake_authz_key_file(delete=True):
"""Create temporary file for fake authorisation hmac signing key.
:returns: Instance of :class:`~.NamedTemporaryFile`, whose *data*
attribute contains the HMAC signing binary key."""
t = NamedTemporaryFile(delete=delete)
with open("/dev/urandom", "rb") as fd:
t.data = fd.read(20)
t.write(t.data)
t.seek(0)
return t
def setup_dummy_server(module_name, class_name, app_name = None, authz_key_file=None, port=8888):
"""Helper function to set up a :class:`~.RESTMain` server from given
module and class. Creates a fake server configuration and instantiates
the server application from it.
:arg str module_name: module from which to import test class.
:arg str class_type: name of the server test class.
:arg str app_name: optional test application name, 'test' by default.
:returns: tuple with the server object and authz hmac signing key."""
if authz_key_file:
test_authz_key = authz_key_file
else:
test_authz_key = fake_authz_key_file()
cfg = Configuration()
main = cfg.section_('main')
main.application = app_name or 'test'
main.silent = True
main.index = 'top'
main.authz_defaults = { 'role': None, 'group': None, 'site': None }
main.section_('tools').section_('cms_auth').key_file = test_authz_key.name
app = cfg.section_(app_name or 'test')
app.admin = 'dada@example.org'
app.description = app.title = 'Test'
views = cfg.section_('views')
top = views.section_('top')
top.object = module_name + "." + class_name
server = RESTMain(cfg, os.getcwd())
server.validate_config()
server.setup_server()
server.install_application()
cherrypy.config.update({'server.socket_port': port})
cherrypy.config.update({'server.socket_host': '127.0.0.1'})
cherrypy.config.update({'request.show_tracebacks': True})
cherrypy.config.update({'environment': 'test_suite'})
for app in viewvalues(cherrypy.tree.apps):
if '/' in app.config:
app.config["/"]["request.show_tracebacks"] = True
return server, test_authz_key | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/REST/Test.py | 0.631935 | 0.193147 | Test.py | pypi |
from __future__ import division, print_function, absolute_import
from future import standard_library
standard_library.install_aliases()
# system modules
import json
import logging
import math
import re
import time
from urllib.parse import quote, unquote
# WMCore modules
from Utils.IteratorTools import grouper
from Utils.CertTools import ckey, cert
from WMCore.Services.pycurl_manager import RequestHandler
from WMCore.Services.pycurl_manager import getdata as multi_getdata
# DBS agregators
from dbs.apis.dbsClient import aggRuns, aggFileLumis
# static variables
STEP_PAT = re.compile(r'Step[0-9]')
TASK_PAT = re.compile(r'Task[0-9]')
def hasHTTPFailed(row):
"""
Evaluates whether the HTTP request through PyCurl failed or not.
:param row: dictionary data returned from pycurl_manager module
:return: a boolean confirming failure or not
"""
if 'data' not in row:
return True
if int(row.get('code', 200)) == 200:
return False
return True
def getMSLogger(verbose, logger=None):
"""
_getMSLogger_
Return a logger object using the standard WMCore formatter
:param verbose: boolean setting debug or not
:return: a logger object
"""
if logger:
return logger
verbose = logging.DEBUG if verbose else logging.INFO
logger = logging.getLogger()
logging.basicConfig(format="%(asctime)s:%(levelname)s:%(module)s: %(message)s",
level=verbose)
return logger
def isRelVal(reqDict):
"""
Helper function to evaluate whether the workflow is RelVal or not.
:param reqDict: dictionary with the workflow description
:return: True if it's a RelVal workflow, otherwise False
"""
return reqDict.get("SubRequestType", "") in ['RelVal', 'HIRelVal']
def dbsInfo(datasets, dbsUrl):
"Provides DBS info about dataset blocks"
datasetBlocks = {}
datasetSizes = {}
datasetTransfers = {}
if not datasets:
return datasetBlocks, datasetSizes, datasetTransfers
urls = ['%s/blocks?detail=True&dataset=%s' % (dbsUrl, d) for d in datasets]
logging.info("Executing %d requests against DBS 'blocks' API, with details", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if hasHTTPFailed(row):
print("FAILURE: dbsInfo for %s. Error: %s %s" % (dataset, row.get('code'), row.get('error')))
continue
rows = json.loads(row['data'])
blocks = []
size = 0
datasetTransfers.setdefault(dataset, {}) # flat dict in the format of blockName: blockSize
for item in rows:
blocks.append(item['block_name'])
size += item['block_size']
datasetTransfers[dataset].update({item['block_name']: item['block_size']})
datasetBlocks[dataset] = blocks
datasetSizes[dataset] = size
return datasetBlocks, datasetSizes, datasetTransfers
def getPileupDocs(mspileupUrl, queryDict):
"""
Fetch documents from MSPileup according to the query passed in.
:param mspileupUrl: string with the MSPileup url
:param queryDict: dictionary with the MongoDB query to run
:return: returns a list with all the pileup objects, or raises
an exception in case of failure
"""
mgr = RequestHandler()
headers = {'Content-Type': 'application/json'}
data = mgr.getdata(mspileupUrl, queryDict, headers, verb='POST',
ckey=ckey(), cert=cert(), encode=True, decode=True)
if data and data.get("result", []):
if "error" in data["result"][0]:
msg = f"Failed to retrieve MSPileup documents with query: {queryDict}"
msg += f" and error message: {data}"
raise RuntimeError(msg)
return data["result"]
def getPileupDatasetSizes(datasets, phedexUrl):
"""
Given a list of datasets, find all their blocks with replicas
available, i.e., blocks that have valid files to be processed,
and calculate the total dataset size
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:return: a dictionary of datasets and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
sizeByDset = {}
if not datasets:
return sizeByDset
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
sizeByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
sizeByDset.setdefault(dataset, 0)
try:
for item in rows['phedex']['block']:
sizeByDset[dataset] += item['bytes']
except Exception as exc:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s" % (dataset, str(exc)))
sizeByDset[dataset] = None
return sizeByDset
def getBlockReplicasAndSize(datasets, phedexUrl, group=None):
"""
Given a list of datasets, find all their blocks with replicas
available (thus blocks with at least 1 valid file), completed
and subscribed.
If PhEDEx group is provided, make sure it's subscribed under that
same group.
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:param group: optional PhEDEx group name
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
dsetBlockSize = {}
if not datasets:
return dsetBlockSize
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getBlockReplicasAndSize for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
dsetBlockSize.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
dsetBlockSize.setdefault(dataset, {})
try:
for item in rows['phedex']['block']:
block = {item['name']: {'blockSize': item['bytes'], 'locations': []}}
for repli in item['replica']:
if repli['complete'] == 'y' and repli['subscribed'] == 'y':
if not group:
block[item['name']]['locations'].append(repli['node'])
elif repli['group'] == group:
block[item['name']]['locations'].append(repli['node'])
dsetBlockSize[dataset].update(block)
except Exception as exc:
print("Failure in getBlockReplicasAndSize for dataset %s. Error: %s" % (dataset, str(exc)))
dsetBlockSize[dataset] = None
return dsetBlockSize
def getPileupSubscriptions(datasets, phedexUrl, group=None, percentMin=99):
"""
Provided a list of datasets, find dataset level subscriptions where it's
as complete as `percent_min`.
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:param group: optional string with the PhEDEx group
:param percent_min: only return subscriptions that are this complete
:return: a dictionary of datasets and a list of their location.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
locationByDset = {}
if not datasets:
return locationByDset
if group:
url = "%s/subscriptions?group=%s" % (phedexUrl, group)
url += "&percent_min=%s&dataset=%s"
else:
url = "%s/subscriptions?" % phedexUrl
url += "percent_min=%s&dataset=%s"
urls = [url % (percentMin, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'subscriptions' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].rsplit('=')[-1]
if row['data'] is None:
print("Failure in getPileupSubscriptions for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
locationByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
locationByDset.setdefault(dataset, [])
try:
for item in rows['phedex']['dataset']:
for subs in item['subscription']:
locationByDset[dataset].append(subs['node'])
except Exception as exc:
print("Failure in getPileupSubscriptions for dataset %s. Error: %s" % (dataset, str(exc)))
locationByDset[dataset] = None
return locationByDset
def getBlocksByDsetAndRun(datasetName, runList, dbsUrl):
"""
Given a dataset name and a list of runs, find all the blocks
:return: flat list of blocks
"""
blocks = set()
if isinstance(runList, set):
runList = list(runList)
urls = []
for runSlice in grouper(runList, 50):
urls.append('%s/blocks?run_num=%s&dataset=%s' % (dbsUrl, str(runSlice).replace(" ", ""), datasetName))
logging.info("Executing %d requests against DBS 'blocks' API, with run_num list", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].rsplit('=')[-1]
if hasHTTPFailed(row):
msg = "Failure in getBlocksByDsetAndRun for %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
for item in rows:
blocks.add(item['block_name'])
return list(blocks)
def getFileLumisInBlock(blocks, dbsUrl, validFileOnly=1):
"""
Given a list of blocks, find their file run lumi information
in DBS for up to 10 blocks concurrently
:param blocks: list of block names
:param dbsUrl: string with the DBS URL
:param validFileOnly: integer flag for valid files only or not
:return: a dict of blocks with list of file/run/lumi info
"""
runLumisByBlock = {}
urls = ['%s/filelumis?validFileOnly=%d&block_name=%s' % (dbsUrl, validFileOnly, quote(b)) for b in blocks]
# limit it to 10 concurrent calls not to overload DBS
logging.info("Executing %d requests against DBS 'filelumis' API, concurrency limited to 10", len(urls))
data = multi_getdata(urls, ckey(), cert(), num_conn=10)
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
if hasHTTPFailed(row):
msg = "Failure in getFileLumisInBlock for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
rows = aggFileLumis(rows) # adjust to DBS Go server output
runLumisByBlock.setdefault(blockName, [])
for item in rows:
runLumisByBlock[blockName].append(item)
return runLumisByBlock
def findBlockParents(blocks, dbsUrl):
"""
Helper function to find block parents given a list of block names.
Return a dictionary in the format of:
{"child dataset name": {"child block": ["parent blocks"],
"child block": ["parent blocks"], ...}}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
parentsByBlock = {}
urls = ['%s/blockparents?block_name=%s' % (dbsUrl, quote(b)) for b in blocks]
logging.info("Executing %d requests against DBS 'blockparents' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
dataset = blockName.split("#")[0]
if hasHTTPFailed(row):
print("Failure in findBlockParents for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error')))
parentsByBlock.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
try:
if dataset in parentsByBlock and parentsByBlock[dataset] is None:
# then one of the block calls has failed, keep it failed!
continue
parentsByBlock.setdefault(dataset, {})
for item in rows:
parentsByBlock[dataset].setdefault(item['this_block_name'], set())
parentsByBlock[dataset][item['this_block_name']].add(item['parent_block_name'])
except Exception as exc:
print("Failure in findBlockParents for block %s. Error: %s" % (blockName, str(exc)))
parentsByBlock[dataset] = None
return parentsByBlock
def getRunsInBlock(blocks, dbsUrl):
"""
Provided a list of block names, find their run numbers
:param blocks: list of block names
:param dbsUrl: string with the DBS URL
:return: a dictionary of block names and a list of run numbers
"""
runsByBlock = {}
urls = ['%s/runs?block_name=%s' % (dbsUrl, quote(b)) for b in blocks]
logging.info("Executing %d requests against DBS 'runs' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
if hasHTTPFailed(row):
msg = "Failure in getRunsInBlock for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
rows = aggRuns(rows) # adjust to DBS Go server output
runsByBlock[blockName] = rows[0]['run_num']
return runsByBlock
def getWorkflow(requestName, reqMgrUrl):
"Get list of workflow info from ReqMgr2 data-service for given request name"
headers = {'Accept': 'application/json'}
params = {}
url = '%s/data/request/%s' % (reqMgrUrl, requestName)
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
data = json.loads(res)
return data.get('result', [])
def getDetoxQuota(url):
"Get list of workflow info from ReqMgr2 data-service for given request name"
headers = {}
params = {}
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
res = res.split('\n')
return res
def eventsLumisInfo(inputs, dbsUrl, validFileOnly=0, sumOverLumi=0):
"Get information about events and lumis for given set of inputs: blocks or datasets"
what = 'dataset'
eventsLumis = {}
if not inputs:
return eventsLumis
if '#' in inputs[0]: # inputs are list of blocks
what = 'block_name'
urls = ['%s/filesummaries?validFileOnly=%s&sumOverLumi=%s&%s=%s'
% (dbsUrl, validFileOnly, sumOverLumi, what, quote(i)) for i in inputs]
data = multi_getdata(urls, ckey(), cert())
for row in data:
data = unquote(row['url'].split('=')[-1])
if hasHTTPFailed(row):
print("FAILURE: eventsLumisInfo for %s. Error: %s %s" % (data,
row.get('code'),
row.get('error')))
continue
rows = json.loads(row['data'])
for item in rows:
eventsLumis[data] = item
return eventsLumis
def getEventsLumis(dataset, dbsUrl, blocks=None, eventsLumis=None):
"Helper function to return number of events/lumis for given dataset or blocks"
nevts = nlumis = 0
if blocks:
missingBlocks = [b for b in blocks if b not in eventsLumis]
if missingBlocks:
eLumis = eventsLumisInfo(missingBlocks, dbsUrl)
eventsLumis.update(eLumis)
for block in blocks:
data = eventsLumis[block]
nevts += data['num_event']
nlumis += data['num_lumi']
return nevts, nlumis
if eventsLumis and dataset in eventsLumis:
data = eventsLumis[dataset]
return data['num_event'], data['num_lumi']
eLumis = eventsLumisInfo([dataset], dbsUrl)
data = eLumis.get(dataset, {'num_event': 0, 'num_lumi': 0})
return data['num_event'], data['num_lumi']
def getComputingTime(workflow, eventsLumis=None, unit='h', dbsUrl=None, logger=None):
"Return computing time per give workflow"
logger = getMSLogger(verbose=True, logger=logger)
cput = None
if 'InputDataset' in workflow:
dataset = workflow['InputDataset']
if 'BlockWhitelist' in workflow and workflow['BlockWhitelist']:
nevts, _ = getEventsLumis(dataset, dbsUrl, workflow['BlockWhitelist'], eventsLumis)
else:
nevts, _ = getEventsLumis(dataset, dbsUrl, eventsLumis=eventsLumis)
tpe = workflow['TimePerEvent']
cput = nevts * tpe
elif 'Chain' in workflow['RequestType']:
base = workflow['RequestType'].replace('Chain', '')
itask = 1
cput = 0
carryOn = {}
while True:
t = '%s%d' % (base, itask)
itask += 1
if t in workflow:
task = workflow[t]
if 'InputDataset' in task:
dataset = task['InputDataset']
if 'BlockWhitelist' in task and task['BlockWhitelist']:
nevts, _ = getEventsLumis(dataset, dbsUrl, task['BlockWhitelist'], eventsLumis)
else:
nevts, _ = getEventsLumis(dataset, dbsUrl, eventsLumis=eventsLumis)
elif 'Input%s' % base in task:
nevts = carryOn[task['Input%s' % base]]
elif 'RequestNumEvents' in task:
nevts = float(task['RequestNumEvents'])
else:
logger.debug("this is not supported, making it zero cput")
nevts = 0
tpe = task.get('TimePerEvent', 1)
carryOn[task['%sName' % base]] = nevts
if 'FilterEfficiency' in task:
carryOn[task['%sName' % base]] *= task['FilterEfficiency']
cput += tpe * nevts
else:
break
else:
nevts = float(workflow.get('RequestNumEvents', 0))
feff = float(workflow.get('FilterEfficiency', 1))
tpe = workflow.get('TimePerEvent', 1)
cput = nevts / feff * tpe
if cput is None:
return 0
if unit == 'm':
cput = cput / (60.)
if unit == 'h':
cput = cput / (60. * 60.)
if unit == 'd':
cput = cput / (60. * 60. * 24.)
return cput
def sigmoid(x):
"Sigmoid function"
return 1. / (1 + math.exp(-x))
def getNCopies(cpuHours, minN=2, maxN=3, weight=50000, constant=100000):
"Calculate number of copies for given workflow"
func = sigmoid(-constant / weight)
fact = (maxN - minN) / (1 - func)
base = (func * maxN - minN) / (func - 1)
return int(base + fact * sigmoid((cpuHours - constant) / weight))
def teraBytes(size):
"Return size in TB (Terabytes)"
return size / (1000 ** 4)
def gigaBytes(size):
"Return size in GB (Gigabytes), rounded to 2 digits"
return round(size / (1000 ** 3), 2)
def elapsedTime(time0, msg='Elapsed time', ndigits=1):
"Helper function to return elapsed time message"
msg = "%s: %s sec" % (msg, round(time.time() - time0, ndigits))
return msg
def getRequest(url, params):
"Helper function to GET data from given URL"
mgr = RequestHandler()
headers = {'Accept': 'application/json'}
verbose = 0
if 'verbose' in params:
verbose = params['verbose']
del params['verbose']
data = mgr.getdata(url, params, headers, ckey=ckey(), cert=cert(), verbose=verbose)
return data
def postRequest(url, params):
"Helper function to POST request to given URL"
mgr = RequestHandler()
headers = {'Accept': 'application/json'}
verbose = 0
if 'verbose' in params:
verbose = params['verbose']
del params['verbose']
data = mgr.getdata(url, params, headers, ckey=ckey(), cert=cert(),
verb='POST', verbose=verbose)
return data
def getIO(request, dbsUrl):
"Get input/output info about given request"
lhe = False
primary = set()
parent = set()
secondary = set()
if 'Chain' in request['RequestType']:
base = request['RequestType'].replace('Chain', '')
item = 1
while '%s%d' % (base, item) in request:
alhe, aprimary, aparent, asecondary = \
ioForTask(request['%s%d' % (base, item)], dbsUrl)
if alhe:
lhe = True
primary.update(aprimary)
parent.update(aparent)
secondary.update(asecondary)
item += 1
else:
lhe, primary, parent, secondary = ioForTask(request, dbsUrl)
return lhe, primary, parent, secondary
def ioForTask(request, dbsUrl):
"Return lfn, primary, parent and secondary datasets for given request"
lhe = False
primary = set()
parent = set()
secondary = set()
if 'InputDataset' in request:
datasets = request['InputDataset']
datasets = datasets if isinstance(datasets, list) else [datasets]
primary = set([r for r in datasets if r])
if primary and 'IncludeParent' in request and request['IncludeParent']:
parent = findParent(primary, dbsUrl)
if 'MCPileup' in request:
pileups = request['MCPileup']
pileups = pileups if isinstance(pileups, list) else [pileups]
secondary = set([r for r in pileups if r])
if 'LheInputFiles' in request and request['LheInputFiles'] in ['True', True]:
lhe = True
return lhe, primary, parent, secondary
def findParent(datasets, dbsUrl):
"""
Helper function to find the parent dataset.
It returns a dictionary key'ed by the child dataset
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
parentByDset = {}
if not datasets:
return parentByDset
urls = ['%s/datasetparents?dataset=%s' % (dbsUrl, d) for d in datasets]
logging.info("Executing %d requests against DBS 'datasetparents' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if hasHTTPFailed(row):
print("Failure in findParent for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
parentByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
try:
for item in rows:
parentByDset[item['this_dataset']] = item['parent_dataset']
except Exception as exc:
print("Failure in findParent for dataset %s. Error: %s" % (dataset, str(exc)))
parentByDset[dataset] = None
return parentByDset | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/MicroService/Tools/Common.py | 0.69181 | 0.217982 | Common.py | pypi |
from __future__ import print_function, division, absolute_import
from builtins import str
from future.utils import viewitems
from future import standard_library
standard_library.install_aliases()
import datetime
import json
import logging
import re
from urllib.parse import quote, unquote
from Utils.CertTools import cert, ckey
from WMCore.Services.pycurl_manager import RequestHandler
from WMCore.Services.pycurl_manager import getdata as multi_getdata
### Amount of days that we wait for stuck rules to be sorted
### After that, the rule is not considered and a new rule is created
STUCK_LIMIT = 7 # 7 days
def parseNewLineJson(stream):
"""
Parse newline delimited json streaming data
"""
for line in stream.split("\n"):
if line:
yield json.loads(line)
def stringDateToEpoch(strDate):
"""
Given a date/time in the format of:
'Thu, 29 Apr 2021 13:15:42 UTC'
it returns an integer with the equivalent EPOCH time
:param strDate: a string with the date and time
:return: the equivalent EPOCH time (integer)
"""
timestamp = datetime.datetime.strptime(strDate, "%a, %d %b %Y %H:%M:%S %Z")
return int(timestamp.strftime('%s'))
def getRucioToken(rucioAuthUrl, rucioAcct):
"""
Provided a Rucio account, fetch a token from the authentication server
:param rucioAuthUrl: url to the rucio authentication server
:param rucioAcct: rucio account to be used
:return: an integer with the expiration time in EPOCH
"""
params = {}
headers = {"X-Rucio-Account": rucioAcct}
url = '%s/auth/x509' % rucioAuthUrl
logging.info("Requesting a token to Rucio for account: %s, against url: %s", rucioAcct, rucioAuthUrl)
mgr = RequestHandler()
res = mgr.getheader(url, params=params, headers=headers, ckey=ckey(), cert=cert())
if res.getReason() == "OK":
userToken = res.getHeaderKey('X-Rucio-Auth-Token')
tokenExpiration = res.getHeaderKey('X-Rucio-Auth-Token-Expires')
logging.info("Retrieved Rucio token valid until: %s", tokenExpiration)
# convert the human readable expiration time to EPOCH time
tokenExpiration = stringDateToEpoch(tokenExpiration)
return userToken, tokenExpiration
raise RuntimeError("Failed to acquire a Rucio token. Error: {}".format(res.getReason()))
def renewRucioToken(rucioAuthUrl, userToken):
"""
Provided a user Rucio token, check it's lifetime and extend it by another hour
:param rucioAuthUrl: url to the rucio authentication server
:param rucioAcct: rucio account to be used
:return: a datetime.datetime object with the new token lifetime
"""
params = {}
headers = {"X-Rucio-Auth-Token": userToken}
url = '%s/auth/validate' % rucioAuthUrl
logging.info("Renewing the Rucio token...")
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
try:
newExpiration = eval(res)['lifetime']
except Exception as exc:
raise RuntimeError("Failed to renew Rucio token. Response: {} Error: {}".format(res, str(exc)))
return newExpiration
def getPileupContainerSizesRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of containers, find their total size in Rucio
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a flat dictionary of container and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE: Rucio version of getPileupDatasetSizes()
"""
sizeByDset = {}
if not containers:
return sizeByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = ['{}/dids/{}/{}?dynamic=anything'.format(rucioUrl, scope, cont) for cont in containers]
logging.info("Executing %d requests against Rucio for the container size", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split('/dids/{}/'.format(scope))[1]
container = container.replace("?dynamic=anything", "")
if row['data'] is None:
msg = "Failure in getPileupContainerSizesRucio for container {}. Response: {}".format(container, row)
logging.error(msg)
sizeByDset.setdefault(container, None)
continue
response = json.loads(row['data'])
try:
sizeByDset.setdefault(container, response['bytes'])
except KeyError:
msg = "getPileupContainerSizesRucio function did not return a valid response for container: %s. Error: %s"
logging.error(msg, container, response)
sizeByDset.setdefault(container, None)
continue
return sizeByDset
def listReplicationRules(containers, rucioAccount, grouping,
rucioUrl, rucioToken, scope="cms"):
"""
List all the replication rules for the input filters provided.
It builds a dictionary of container name and the locations where
they have a rule locking data on, with some additional rule state
logic in the code.
:param containers: list of container names
:param rucioAccount: string with the rucio account
:param grouping: rule grouping string, only "A" or "D" are allowed
:param rucioUrl: string with the Rucio url
:param rucioToken: string with the Rucio token
:param scope: string with the data scope
:return: a flat dictionary key'ed by the container name, with a list of RSE
expressions that still need to be resolved
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE-2: Available rule states can be found at:
https://github.com/rucio/rucio/blob/16f39dffa1608caa0a1af8bbc0fcff2965dccc50/lib/rucio/db/sqla/constants.py#L180
"""
locationByContainer = {}
if not containers:
return locationByContainer
if grouping not in ["A", "D"]:
raise RuntimeError("Replication rule grouping value provided ({}) is not allowed!".format(grouping))
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = []
for cont in containers:
urls.append('{}/rules/?scope={}&account={}&grouping={}&name={}'.format(rucioUrl, scope, rucioAccount,
grouping, quote(cont, safe="")))
logging.info("Executing %d requests against Rucio to list replication rules", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = unquote(row['url'].split("name=")[1])
if "200 OK" not in row['headers']:
msg = "Failure in listReplicationRules for container {}. Response: {}".format(container, row)
logging.error(msg)
locationByContainer.setdefault(container, None)
continue
try:
locationByContainer.setdefault(container, [])
for item in parseNewLineJson(row['data']):
if item['state'] in ["U", "SUSPENDED", "R", "REPLICATING", "I", "INJECT"]:
msg = "Container %s has a rule ID %s in state %s. Will try to create a new rule."
logging.warning(msg, container, item['id'], item['state'])
continue
elif item['state'] in ["S", "STUCK"]:
if item['error'] == 'NO_SOURCES:NO_SOURCES':
msg = "Container {} has a STUCK rule with NO_SOURCES.".format(container)
msg += " Data could be lost forever... Rule info is: {}".format(item)
logging.warning(msg)
continue
# then calculate for how long it's been stuck
utcTimeNow = int(datetime.datetime.utcnow().strftime('%s'))
if item['stuck_at']:
stuckAt = stringDateToEpoch(item['stuck_at'])
else:
# consider it to be stuck since its creation
stuckAt = stringDateToEpoch(item['created_at'])
daysStuck = (utcTimeNow - stuckAt) // (24 * 60 * 60)
if daysStuck > STUCK_LIMIT:
msg = "Container {} has a STUCK rule for {} days (limit set to: {}).".format(container,
daysStuck,
STUCK_LIMIT)
msg += " Not going to use it! Rule info: {}".format(item)
logging.warning(msg)
continue
else:
msg = "Container {} has a STUCK rule for only {} days.".format(container, daysStuck)
msg += " Considering it for the pileup location"
logging.info(msg)
else:
logging.info("Container %s has rule ID %s in state %s, using it.",
container, item['id'], item['state'])
### NOTE: this is not an RSE name, but an RSE expression that still needs to be resolved
locationByContainer[container].append(item['rse_expression'])
except Exception as exc:
msg = "listReplicationRules function did not return a valid response for container: %s."
msg += "Server responded with: %s\nError: %s"
logging.exception(msg, container, str(exc), row['data'])
locationByContainer.setdefault(container, None)
continue
return locationByContainer
def getPileupSubscriptionsRucio(datasets, rucioUrl, rucioToken, scope="cms"):
"""
Provided a list of datasets, find dataset level subscriptions where it's
as complete as `percent_min`.
:param datasets: list of dataset names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary of datasets and a list of their location.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
# FIXME: we should definitely make a feature request to Rucio...
# so much, just to get the final RSEs for a container!!!
locationByDset = {}
if not datasets:
return locationByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
# first, resolve the dataset into blocks
blocksByDset = getContainerBlocksRucio(datasets, rucioUrl, rucioToken, scope)
urls = []
for _dset, blocks in viewitems(blocksByDset):
if blocks:
for block in blocks:
urls.append('{}/replicas/{}/{}/datasets'.format(rucioUrl, scope, quote(block)))
# this is going to be bloody expensive in terms of HTTP requests
logging.info("Executing %d requests against Rucio replicas API for blocks", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
block = row['url'].split("/{}/".format(scope))[1]
block = unquote(re.sub("/datasets$", "", block, 1))
container = block.split("#")[0]
locationByDset.setdefault(container, set())
if row['data'] is None:
msg = "Failure in getPileupSubscriptionsRucio container {} and block {}.".format(container, block)
msg += " Response: {}".format(row)
logging.error(msg)
locationByDset[container] = None
continue
if locationByDset[container] is None:
# then one of the block requests failed, skip the whole dataset
continue
thisBlockRSEs = set()
for item in parseNewLineJson(row['data']):
if item['state'] == "AVAILABLE":
thisBlockRSEs.add(item["rse"])
logging.info("Block: %s is available at: %s", block, thisBlockRSEs)
# now we have the final block location
if not locationByDset[container]:
# then this is the first block of this dataset
locationByDset[container] = thisBlockRSEs
else:
# otherwise, make an intersection of them
locationByDset[container] = locationByDset[container] & thisBlockRSEs
return locationByDset
def getBlocksAndSizeRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of containers, find all their correspondent blocks and their sizes.
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE2: meant to return an output similar to Common.getBlockReplicasAndSize
"""
contBlockSize = {}
if not containers:
return contBlockSize
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = []
for cont in containers:
urls.append('{}/dids/{}/dids/search?type=dataset&long=True&name={}'.format(rucioUrl, scope, quote(cont + "#*")))
logging.info("Executing %d requests against Rucio DIDs search API for containers", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split("name=")[1]
container = unquote(container).replace("#*", "")
contBlockSize.setdefault(container, {})
if row['data'] in [None, ""]:
msg = "Failure in getBlocksAndSizeRucio function for container {}. Response: {}".format(container, row)
logging.error(msg)
contBlockSize[container] = None
continue
for item in parseNewLineJson(row['data']):
# NOTE: we do not care about primary block location in Rucio
contBlockSize[container][item['name']] = {"blockSize": item['bytes'], "locations": []}
return contBlockSize
### NOTE: likely not going to be used for a while
def getContainerBlocksRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Provided a list of containers, find all their blocks.
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary key'ed by the datasets with a list of blocks.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
blocksByDset = {}
if not containers:
return blocksByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = ['{}/dids/{}/{}/dids'.format(rucioUrl, scope, cont) for cont in containers]
logging.info("Executing %d requests against Rucio DIDs API for blocks in containers", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split("/{}/".format(scope))[1]
container = re.sub("/dids$", "", container, 1)
if not row['data']:
logging.warning("Dataset: %s has no blocks in Rucio", container)
blocksByDset.setdefault(container, [])
for item in parseNewLineJson(row['data']):
blocksByDset[container].append(item["name"])
return blocksByDset
### NOTE: likely not going to be used for a while
def getBlockReplicasAndSizeRucio(datasets, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of datasets, find all their blocks with replicas
available.
:param datasets: list of dataset names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
dsetBlockSize = {}
if not datasets:
return dsetBlockSize
headers = {"X-Rucio-Auth-Token": rucioToken}
# first, figure out their block names
blocksByDset = getContainerBlocksRucio(datasets, rucioUrl, rucioToken, scope=scope)
urls = []
for _dset, blocks in viewitems(blocksByDset):
for block in blocks:
urls.append('{}/replicas/{}/{}/datasets'.format(rucioUrl, scope, quote(block)))
# next, query the replicas API for the block location
# this is going to be bloody expensive in terms of HTTP requests
logging.info("Executing %d requests against Rucio replicas API for blocks", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
block = row['url'].split("/{}/".format(scope))[1]
block = unquote(re.sub("/datasets$", "", block, 1))
container = block.split("#")[0]
dsetBlockSize.setdefault(container, dict())
if row['data'] is None:
msg = "Failure in getBlockReplicasAndSizeRucio for container {} and block {}.".format(container, block)
msg += " Response: {}".format(row)
logging.error(msg)
dsetBlockSize[container] = None
continue
if dsetBlockSize[container] is None:
# then one of the block requests failed, skip the whole dataset
continue
thisBlockRSEs = []
blockBytes = 0
for item in parseNewLineJson(row['data']):
blockBytes = item['bytes']
if item['state'] == "AVAILABLE":
thisBlockRSEs.append(item["rse"])
# now we have the final block location
if not blockBytes and not thisBlockRSEs:
logging.warning("Block: %s has no replicas and no size", block)
else:
dsetBlockSize[container][block] = {"locations": thisBlockRSEs, "blockSize": blockBytes}
return dsetBlockSize | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/MicroService/Tools/PycurlRucio.py | 0.661923 | 0.218305 | PycurlRucio.py | pypi |
from __future__ import print_function, division
# system modules
import os
import re
# WMCore modules
from WMCore.REST.Server import RESTFrontPage
class FrontPage(RESTFrontPage):
"""MicroService front page.
MicroService provides only one web page, the front page. The page just
loads the javascript user interface, complete with CSS and all JS
code embedded into it.
The JavaScript code performs all the app functionality via the REST
interface defined by the :class:`~.Data` class.
"""
def __init__(self, app, config, mount):
"""
:arg app: reference to the application object.
:arg config: reference to the configuration.
:arg str mount: URL mount point."""
mainroot = 'microservice' # entry point in access URL
wpath = os.getenv('MS_STATIC_ROOT', '')
print(wpath)
if not wpath:
content = os.path.abspath(__file__).rsplit('/', 5)[0]
xlib = (__file__.find("/xlib/") >= 0 and "x") or ""
wpath = "%s/%sdata/" % (content, xlib)
if not wpath.endswith('/'):
wpath += '/'
print(self.__class__.__name__, "static content: %s" % wpath)
mdict = {"root": wpath, "rx": re.compile(r"^[a-z]+/[-a-z0-9]+\.(?:html)$")}
tdict = {"root": wpath + "templates/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:html|tmpl)$")}
jdict = {"root": wpath + "js/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:js)$")}
cdict = {"root": wpath + "css/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\..*(?:css)$")}
idict = {"root": wpath + "images/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:png|gif|jpg)$")}
roots = {mainroot: mdict, "templates": tdict,
"js": jdict, "css": cdict, "images": idict}
# location of frontpage in the root, e.g. microservice
frontpage = "%s/templates/index.html" % mainroot
RESTFrontPage.__init__(self, app, config, mount, frontpage, roots) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/MicroService/WebGui/FrontPage.py | 0.585931 | 0.161816 | FrontPage.py | pypi |
from threading import current_thread
# WMCore modules
from WMCore.MicroService.MSCore.MSAuth import MSAuth
from WMCore.MicroService.MSCore.MSCore import MSCore
from WMCore.MicroService.DataStructs.DefaultStructs import PILEUP_REPORT
from WMCore.MicroService.MSPileup.MSPileupData import MSPileupData
class MSPileup(MSCore):
"""
MSPileup provides whole logic behind the pileup WMCore module.
"""
def __init__(self, msConfig, **kwargs):
super().__init__(msConfig)
self.dataMgr = MSPileupData(msConfig)
self.authMgr = MSAuth(msConfig)
# Get the RSE expression for Disk RSEs from the configuration
self.diskRSEExpr = msConfig.get("rucioDiskExpression", "")
def status(self):
"""
Provide MSPileup status API. We should extend it to check DB connection, etc.
:return: status dictionary
"""
summary = dict(PILEUP_REPORT)
summary.update({'thread_id': current_thread().name})
return summary
def getPileup(self, **kwargs):
"""
MSPileup get API fetches the data from underlying database layer
:param **kwargs: provide key=value (or spec) input
:return: results of MSPileup data layer (list of dicts)
"""
spec = {} # (get all full docs) retrieve a list with the full documentation of all the pileups
if 'query' in kwargs:
# use specific spec (JSON query)
spec = kwargs['query']
elif 'pileupName' in kwargs:
# get docs for given pileupName
spec = {'pileupName': kwargs['pileupName']}
elif 'campaign' in kwargs:
# get docs for given campaign
spec = {'campaign': kwargs['campaign']}
else:
for key, val in kwargs.items():
spec[key] = val
# check if filters are present and use it as projection fields
projection = {}
for key in kwargs.get('filters', []):
projection[key] = 1
results = self.dataMgr.getPileup(spec, projection)
return results
def queryDatabase(self, query, projection=None):
"""
MSPileup query database API querying the data in underlying data layer.
:param query: provide query JSON spec to MSPileup data layer
:return: results of MSPileup data layer (list of dicts)
"""
spec = {} # (get all full docs) retrieve a list with the full documentation of all the pileups
if 'query' in query:
# use specific spec (JSON query)
spec = query['query']
# check if filters are present and use it as projection fields
projection = {}
for key in query.get('filters', []):
projection[key] = 1
return self.dataMgr.getPileup(spec, projection)
def createPileup(self, pdict):
"""
MSPileup create pileup API to create appropriate pileup document
in underlying database.
:param pdict: input MSPileup data dictionary
:return: results of MSPileup data layer (list of dicts)
"""
self.authMgr.authorizeApiAccess('ms-pileup', 'create')
rseNames = self.rucio.evaluateRSEExpression(self.diskRSEExpr, useCache=True)
return self.dataMgr.createPileup(pdict, rseNames)
def updatePileup(self, pdict):
"""
MSPileup update API to update corresponding pileup document in data layer.
:param pdict: input MSPilup data dictionary
:return: results of MSPileup data layer (list of dicts)
"""
self.authMgr.authorizeApiAccess('ms-pileup', 'update')
rseNames = self.rucio.evaluateRSEExpression(self.diskRSEExpr, useCache=True)
return self.dataMgr.updatePileup(pdict, rseNames, validate=True)
def deletePileup(self, spec):
"""
MSPileup delete API to delete corresponding pileup document in data layer.
:param pdict: input MSPilup data dictionary
:return: results of MSPileup data layer (list of dicts)
"""
self.authMgr.authorizeApiAccess('ms-pileup', 'delete')
return self.dataMgr.deletePileup(spec) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/MicroService/MSPileup/MSPileup.py | 0.782372 | 0.270649 | MSPileup.py | pypi |
import time
# WMCore modules
from WMCore.MicroService.Tools.Common import getMSLogger
# CMSMonitoring modules
from CMSMonitoring.StompAMQ7 import StompAMQ7 as StompAMQ
def flatDocuments(doc):
"""
Helper function to flat out MSPileup document
:param doc: input MSPileup document
:return: generator of MSPileup documents flatten from original one
"""
docs = flatKey(doc, 'campaigns')
docs = (f for d in docs for f in flatKey(d, 'currentRSEs'))
docs = (f for d in docs for f in flatKey(d, 'expectedRSEs'))
for doc in docs:
yield doc
def flatKey(doc, key):
"""
Helper function to flat out values of given key in a document
:param doc: input MSPileup document
:param key: document key to use
:return: generator of MSPileup documents flatten from original one and given key
"""
for item in doc[key]:
ndoc = dict(doc)
# convert plural to singular key name, e.g. campaigns -> campaign
nkey = key[:-1]
ndoc[nkey] = item
del ndoc[key]
yield ndoc
class MSPileupMonitoring():
"""
MSPileupMonitoring represents MSPileup monitoring class
"""
def __init__(self, msConfig=None):
"""
Constructor for MSPileupMonitoring
"""
self.userAMQ = msConfig.get('user_amq', None)
self.passAMQ = msConfig.get('pass_amq', None)
self.topicAMQ = msConfig.get('topic_amq', None)
self.docTypeAMQ = msConfig.get('doc_type_amq', 'cms-ms-pileup')
self.hostPortAMQ = msConfig.get('host_port_amq', None)
self.producer = msConfig.get('producer', 'cms-ms-pileup')
self.logger = msConfig.get('logger', getMSLogger(False))
def uploadToAMQ(self, docs, producer=None):
"""
_uploadToAMQ_
Sends data to AMQ, which ends up in elastic search.
:param docs: list of documents/dicts to be posted
:param producer: service name that's providing this info
:return: {} or {"success": ndocs, "failures": nfailures}
"""
if not docs:
self.logger.info("There are no documents to send to AMQ")
return {}
if not self.userAMQ or not self.passAMQ:
self.logger.info("MSPileupMonitoring has no AMQ credentials, will skip the upload to MONIT")
return {}
producer = producer or self.producer
ts = int(time.time())
notifications = []
self.logger.debug("Sending %d to AMQ", len(docs))
try:
stompSvc = StompAMQ(username=self.userAMQ,
password=self.passAMQ,
producer=producer,
topic=self.topicAMQ,
validation_schema=None,
host_and_ports=self.hostPortAMQ,
logger=self.logger)
for doc in docs:
singleNotif, _, _ = stompSvc.make_notification(payload=doc, doc_type=self.docTypeAMQ,
ts=ts, data_subfield="payload")
notifications.append(singleNotif)
failures = stompSvc.send(notifications)
msg = "%i out of %i documents successfully sent to AMQ" % (len(notifications) - len(failures),
len(notifications))
self.logger.info(msg)
return {"success": len(notifications) - len(failures), "failures": len(failures)}
except Exception as ex:
self.logger.exception("Failed to send data to StompAMQ. Error %s", str(ex))
return {} | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/MicroService/MSPileup/MSPileupMonitoring.py | 0.631594 | 0.273587 | MSPileupMonitoring.py | pypi |
import json
# WMCore modules
from Utils.Timers import gmtimeSeconds
from WMCore.MicroService.Tools.Common import getMSLogger
from WMCore.Lexicon import dataset
class MSPileupObj():
"""
MSPileupObj defines MSPileup data stucture
"""
def __init__(self, pdict, verbose=None, logger=None, validRSEs=None):
self.logger = getMSLogger(verbose, logger)
if not validRSEs:
validRSEs = []
self.validRSEs = validRSEs
self.data = {
'pileupName': pdict['pileupName'],
'pileupType': pdict['pileupType'],
'insertTime': pdict.get('insertTime', gmtimeSeconds()),
'lastUpdateTime': pdict.get('lastUpdateTime', gmtimeSeconds()),
'expectedRSEs': pdict['expectedRSEs'],
'currentRSEs': pdict.get('currentRSEs', []),
'fullReplicas': pdict.get('fullReplicas', 1),
'campaigns': pdict.get('campaigns', []),
'containerFraction': pdict.get('containerFraction', 1.0),
'replicationGrouping': pdict.get('replicationGrouping', 'ALL'),
'activatedOn': pdict.get('activatedOn', gmtimeSeconds()),
'deactivatedOn': pdict.get('deactivatedOn', gmtimeSeconds()),
'active': pdict['active'],
'pileupSize': pdict.get('pileupSize', 0),
'ruleIds': pdict.get('ruleIds', [])}
valid, msg = self.validate(self.data)
if not valid:
msg = f'MSPileup input is invalid, {msg}'
raise Exception(msg)
def __str__(self):
"""
Return human readable representation of pileup data
"""
return json.dumps(self.data, indent=4)
def getPileupData(self):
"""
Get pileup data
"""
return self.data
def validate(self, pdict=None):
"""
Validate data according to its schema. If data is not provided via
input pdict parameter, the validate method will validate internal
data object.
:param pdict: input data dictionary (optional)
:return: (boolean status, string message) result of validation
"""
msg = ""
if not pdict:
pdict = self.data
docSchema = schema()
if set(pdict) != set(docSchema):
pkeys = set(pdict.keys())
skeys = set(docSchema.keys())
msg = f"provided object {pkeys} keys are not equal to schema keys {skeys}"
self.logger.error(msg)
return False, msg
for key, val in pdict.items():
if key not in docSchema:
msg = f"Failed to validate {key}, not found in {docSchema}"
self.logger.error(msg)
return False, msg
_, stype = docSchema[key] # expected data type for our key
if not isinstance(val, stype):
dtype = str(type(val)) # obtained data type of our value
msg = f"Failed to validate: {key}, expect data-type {stype} got type {dtype}"
self.logger.error(msg)
return False, msg
if key == 'pileupName':
try:
dataset(val)
except AssertionError:
msg = f"pileupName value {val} does not match dataset pattern"
self.logger.error(msg)
return False, msg
if key == "pileupType" and val not in ['classic', 'premix']:
msg = f"pileupType value {val} is neither of ['classic', 'premix']"
self.logger.error(msg)
return False, msg
if key == 'replicationGrouping' and val not in ['DATASET', 'ALL']:
msg = f"replicationGrouping value {val} is neither of ['DATASET', 'ALL']"
self.logger.error(msg)
return False, msg
if key == 'containerFraction' and (val > 1 or val < 0):
msg = f"containerFraction value {val} outside [0,1] range"
self.logger.error(msg)
return False, msg
if key in ('expectedRSEs', 'currentRSEs') and not self.validateRSEs(val):
msg = f"{key} value {val} is not in validRSEs {self.validRSEs}"
self.logger.error(msg)
return False, msg
if key == 'expectedRSEs' and len(val) == 0:
msg = 'document require non-empty list of expectedRSEs'
self.logger.error(msg)
return False, msg
return True, msg
def validateRSEs(self, rseList):
"""
Validate given list of RSEs
:param rseList: list of RSEs
:return: boolean
"""
if rseList == self.validRSEs:
return True
for rse in rseList:
if rse not in self.validRSEs:
return False
return True
def schema():
"""
Return the data schema for a record in MongoDB.
It's a dictionary where:
- key is schema attribute name
- a value is a tuple of (default value, expected data type)
:return: a dictionary
"""
doc = {'pileupName': ('', str),
'pileupType': ('', str),
'insertTime': (0, int),
'lastUpdateTime': (0, int),
'expectedRSEs': ([], list),
'currentRSEs': ([], list),
'fullReplicas': (0, int),
'campaigns': ([], list),
'containerFraction': (1.0, float),
'replicationGrouping': ('', str),
'activatedOn': (0, int),
'deactivatedOn': (0, int),
'active': (False, bool),
'pileupSize': (0, int),
'ruleIds': ([], list)}
return doc | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/MicroService/MSPileup/DataStructs/MSPileupObj.py | 0.614278 | 0.244871 | MSPileupObj.py | pypi |
from builtins import range
from WMCore.DataStructs.Run import Run
class Mask(dict):
"""
_Mask_
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
self.inclusive = True
self.setdefault("inclusivemask", True)
self.setdefault("FirstEvent", None)
self.setdefault("LastEvent", None)
self.setdefault("FirstLumi", None)
self.setdefault("LastLumi", None)
self.setdefault("FirstRun", None)
self.setdefault("LastRun", None)
self.setdefault("runAndLumis", {})
def setMaxAndSkipEvents(self, maxEvents, skipEvents):
"""
_setMaxAndSkipEvents_
Set FirstEvent & LastEvent fields as max & skip events
"""
self['FirstEvent'] = skipEvents
if maxEvents is not None:
self['LastEvent'] = skipEvents + maxEvents
return
def setMaxAndSkipLumis(self, maxLumis, skipLumi):
"""
_setMaxAndSkipLumis
Set the Maximum number of lumi sections and the starting point
"""
self['FirstLumi'] = skipLumi
self['LastLumi'] = skipLumi + maxLumis
return
def setMaxAndSkipRuns(self, maxRuns, skipRun):
"""
_setMaxAndSkipRuns
Set the Maximum number of runss and the starting point
"""
self['FirstRun'] = skipRun
self['LastRun'] = skipRun + maxRuns
return
def getMaxEvents(self):
"""
_getMaxEvents_
return maxevents setting
"""
if self['LastEvent'] is None or self['FirstEvent'] is None:
return None
return self['LastEvent'] - self['FirstEvent'] + 1
def getMax(self, keyType=None):
"""
_getMax_
returns the maximum number of runs/events/etc of the type of the type string
"""
if 'First%s' % (keyType) not in self:
return None
if self['First%s' % (keyType)] is None or self['Last%s' % (keyType)] is None:
return None
return self['Last%s' % (keyType)] - self['First%s' % (keyType)] + 1
def addRun(self, run):
"""
_addRun_
Add a run object
"""
run.lumis.sort()
firstLumi = run.lumis[0]
lastLumi = run.lumis[0]
for lumi in run.lumis:
if lumi <= lastLumi + 1:
lastLumi = lumi
else:
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
firstLumi = lumi
lastLumi = lumi
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
return
def addRunWithLumiRanges(self, run, lumiList):
"""
_addRunWithLumiRanges_
Add to runAndLumis with call signature
addRunWithLumiRanges(run=run, lumiList = [[start1,end1], [start2, end2], ...]
"""
self['runAndLumis'][run] = lumiList
return
def addRunAndLumis(self, run, lumis=None):
"""
_addRunAndLumis_
Add runs and lumis directly
TODO: The name of this function is a little misleading. If you pass a list of lumis
it ignores the content of the list and adds a range based on the max/min in
the list. Missing lumis in the list are ignored.
NOTE: If the new run/lumi range overlaps with the pre-existing lumi ranges in the
mask, no attempt is made to merge these together. This can result in a mask
with duplicate lumis.
"""
lumis = lumis or []
if not isinstance(lumis, list):
lumis = list(lumis)
if run not in self['runAndLumis']:
self['runAndLumis'][run] = []
self['runAndLumis'][run].append([min(lumis), max(lumis)])
return
def getRunAndLumis(self):
"""
_getRunAndLumis_
Return list of active runs and lumis
"""
return self['runAndLumis']
def runLumiInMask(self, run, lumi):
"""
_runLumiInMask_
See if a particular runLumi is in the mask
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return True
if run not in self['runAndLumis']:
return False
for pair in self['runAndLumis'][run]:
# Go through each max and min pair
if pair[0] <= lumi and pair[1] >= lumi:
# Then the lumi is bracketed
return True
return False
def filterRunLumisByMask(self, runs):
"""
_filterRunLumisByMask_
Pass a Mask a list of run objects, get back a list of
run objects that correspond to the actual mask allowed values
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return runs
runDict = {}
for r in runs:
if r.run in runDict:
runDict[r.run].extendLumis(r.lumis)
else:
runDict[r.run] = r
maskRuns = set(self["runAndLumis"].keys())
passedRuns = set([r.run for r in runs])
filteredRuns = maskRuns.intersection(passedRuns)
newRuns = set()
for runNumber in filteredRuns:
maskLumis = set()
for pair in self["runAndLumis"][runNumber]:
if pair[0] == pair[1]:
maskLumis.add(pair[0])
else:
maskLumis = maskLumis.union(list(range(pair[0], pair[1] + 1)))
filteredLumis = set(runDict[runNumber].lumis).intersection(maskLumis)
if len(filteredLumis) > 0:
filteredLumiEvents = [(lumi, runDict[runNumber].getEventsByLumi(lumi)) for lumi in filteredLumis]
newRuns.add(Run(runNumber, *filteredLumiEvents))
return newRuns | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/DataStructs/Mask.py | 0.704567 | 0.254295 | Mask.py | pypi |
from __future__ import absolute_import, division, print_function
from future.utils import listitems
import sys
import hashlib
import time
from functools import total_ordering
from Utils.Utilities import encodeUnicodeToBytes
from WMCore.DataStructs.WMObject import WMObject
@total_ordering
class WorkUnit(WMObject, dict):
"""
_WorkUnit_
Data object that contains details for a single work unit
corresponding to tables workunit and frl_workunit_assoc
"""
fieldsToCopy = ['taskid', 'retry_count', 'last_unit_count', 'last_submit_time', 'status', 'firstevent',
'lastevent', 'fileid']
fieldsForInfo = fieldsToCopy + ['run_lumi']
def __init__(self, taskID=None, retryCount=0, lastUnitCount=None, lastSubmitTime=int(time.time()),
status=0, firstEvent=1, lastEvent=sys.maxsize, fileid=None, runLumi=None):
super(WorkUnit, self).__init__(self)
self.setdefault('taskid', taskID)
self.setdefault('retry_count', retryCount)
self.setdefault('last_unit_count', lastUnitCount)
self.setdefault('last_submit_time', lastSubmitTime)
self.setdefault('status', status)
self.setdefault('firstevent', firstEvent)
self.setdefault('lastevent', lastEvent)
self.setdefault('fileid', fileid)
self.setdefault('run_lumi', runLumi)
def __lt__(self, rhs):
"""
Compare work units in task id, run, lumi, first event, last event
"""
if self['taskid'] != rhs['taskid']:
return self['taskid'] < rhs['taskid']
if self['run_lumi'].run != rhs['run_lumi'].run:
return self['run_lumi'].run < rhs['run_lumi'].run
if self['run_lumi'].lumis != rhs['run_lumi'].lumis:
return self['run_lumi'].lumis < rhs['run_lumi'].lumis
if self['first_event'] != rhs['first_event']:
return self['first_event'] < rhs['first_event']
return self['last_event'] < rhs['last_event']
def __eq__(self, rhs):
"""
Work unit is equal if it has the same task, run, and lumi
"""
return (self['taskid'] == rhs['taskid'] and self['run_lumi'].run == self['run_lumi'].run and
self['run_lumi'].lumis == self['run_lumi'].lumis and self['firstevent'] == rhs['firstevent'] and
self['lastevent'] == rhs['lastevent'])
def __hash__(self):
"""
Hash function for this dict.
"""
# Generate an immutable sorted string representing this object
# NOTE: the run object needs to be hashed
immutableSelf = []
for keyName in sorted(self):
if keyName == "run_lumi":
immutableSelf.append((keyName, hash(self[keyName])))
else:
immutableSelf.append((keyName, self[keyName]))
hashValue = hashlib.sha1(encodeUnicodeToBytes(str(immutableSelf)))
return int(hashValue.hexdigest()[:15], 16)
def json(self, thunker=None):
"""
_json_
Serialize the object. Only copy select fields and construct one new field.
"""
jsonDict = {k: self[k] for k in WorkUnit.fieldsToCopy}
jsonDict["run_lumi"] = {"run_number": self['run_lumi'].run, "lumis": self['run_lumi'].lumis}
return jsonDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker)
def getInfo(self):
"""
Returns: tuple of parameters for the work unit
"""
return tuple(self[x] for x in WorkUnit.fieldsForInfo) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/DataStructs/WorkUnit.py | 0.592195 | 0.15241 | WorkUnit.py | pypi |
from builtins import str, bytes
__all__ = []
from WMCore.DataStructs.Run import Run
from WMCore.DataStructs.WMObject import WMObject
class File(WMObject, dict):
"""
_File_
Data object that contains details for a single file
TODO
- use the decorator `from functools import total_ordering` after
dropping support for python 2.6
- then, drop __ne__, __le__, __gt__, __ge__
"""
def __init__(self, lfn="", size=0, events=0, checksums=None,
parents=None, locations=None, merged=False):
dict.__init__(self)
checksums = checksums or {}
self.setdefault("lfn", lfn)
self.setdefault("size", size)
self.setdefault("events", events)
self.setdefault("checksums", checksums)
self.setdefault('runs', set())
self.setdefault('merged', merged)
self.setdefault('last_event', 0)
self.setdefault('first_event', 0)
if locations is None:
self.setdefault("locations", set())
else:
self.setdefault("locations", locations)
if parents is None:
self.setdefault("parents", set())
else:
self.setdefault("parents", parents)
def addRun(self, run):
"""
_addRun_
run should be an instance of WMCore.DataStructs.Run
Add a run container to this file, tweak the run and lumi
keys to be max run and max lumi for backwards compat.
"""
if not isinstance(run, Run):
msg = "addRun argument must be of type WMCore.DataStructs.Run"
raise RuntimeError(msg)
addFlag = False
for runMember in self['runs']:
if runMember.run == run.run:
# this rely on Run object overwrite __add__ to update self
runMember + run
addFlag = True
if not addFlag:
self['runs'].add(run)
return
def load(self):
"""
A DataStructs file has nothing to load from, other implementations will
over-ride this method.
"""
if self['id']:
self['lfn'] = '/store/testing/%s' % self['id']
def save(self):
"""
A DataStructs file has nothing to save to, other implementations will
over-ride this method.
"""
pass
def setLocation(self, pnn):
# Make sure we don't add None, [], "" as file location
if pnn:
self['locations'] = self['locations'] | set(self.makelist(pnn))
def __eq__(self, rhs):
"""
File is equal if it has the same name
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] == rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] == rhs
return eq
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
thisHash = self['lfn'].__hash__()
return thisHash
def __lt__(self, rhs):
"""
Sort files based on lexicographical ordering of the value connected
to the 'lfn' key
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] < rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] < rhs
return eq
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
def json(self, thunker=None):
"""
_json_
Serialize the file object. This will convert all Sets() to lists and
weed out the internal data structures that don't need to be shared.
"""
fileDict = {"last_event": self["last_event"],
"first_event": self["first_event"],
"lfn": self["lfn"],
"locations": list(self["locations"]),
"id": self.get("id", None),
"checksums": self["checksums"],
"events": self["events"],
"merged": self["merged"],
"size": self["size"],
"runs": [],
"parents": []}
for parent in self["parents"]:
if isinstance(parent, (str, bytes)):
# Then for some reason, we're passing strings
# Done specifically for ErrorHandler
fileDict['parents'].append(parent)
elif thunker is None:
continue
else:
fileDict["parents"].append(thunker._thunk(parent))
for run in self["runs"]:
runDict = {"run_number": run.run,
"lumis": run.lumis}
fileDict["runs"].append(runDict)
return fileDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/DataStructs/File.py | 0.533884 | 0.174868 | File.py | pypi |
from builtins import str
from WMCore.DataStructs.WMObject import WMObject
class SummaryHistogram(WMObject):
"""
_SummaryHistogram_
Histogram object, provides familiar CRUD methods
which take care of most of the statistical
calculations when adding points, this object
can also be converted into a dictionary
for JSON documents. It knows how to combine
with other histograms and create itself from
a dictionary provided it has matching structure.
This is an interface, the real work is done
by the ContinuousSummaryHistogram and
DiscreteSummaryHistogram objects
"""
def __init__(self, title = None, xLabel = None):
"""
__init__
Initialize the elements in the object.
"""
# Meta-information about the histogram, it can be changed at any point
self.title = title
self.xLabel = xLabel
# These shouldn't be touched from anything outside the SummaryHistogram object and children classes
self.continuous = None
self.jsonInternal = None
self.data = {}
self.average = None
self.stdDev = None
return
def setTitle(self, newTitle):
"""
_setTitle_
Set the title
"""
self.title = newTitle
return
def setHorizontalLabel(self, xLabel):
"""
_setHorizontalLabel_
Set the label on the x axis
"""
self.xLabel = xLabel
return
def addPoint(self, xValue, yLabel):
"""
_addPoint_
Add a point to the histogram data, a histogram
can have many types of y values for the same x if
x is continuous otherwise it is only one yLabel.
They should be in a similar scale for best results.
"""
raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation")
def toJSON(self):
"""
_toJSON_
Return a dictionary which is compatible
with a JSON object
"""
if self.continuous is None:
raise TypeError("toJSON can't be called on a bare SummaryHistogram object")
# Get what the children classes did
jsonDict = {}
jsonDict['internalData'] = self.jsonInternal or {}
# Add the common things
jsonDict['title'] = self.title
jsonDict['xLabel'] = self.xLabel
jsonDict['continuous'] = self.continuous
jsonDict['data'] = self.data
jsonDict['stdDev'] = self.stdDev
jsonDict['average'] = self.average
return jsonDict
def __add__(self, other):
"""
__add__
Add two histograms, combine statistics.
"""
raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation")
def __str__(self):
"""
__str__
Return the str object of the JSON
"""
return str(self.toJSON()) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/DataStructs/MathStructs/SummaryHistogram.py | 0.844601 | 0.581749 | SummaryHistogram.py | pypi |
from __future__ import division
import math
from WMCore.DataStructs.MathStructs.SummaryHistogram import SummaryHistogram
from WMCore.Algorithms.MathAlgos import validateNumericInput
from WMCore.Algorithms.MathAlgos import calculateRunningAverageAndQValue, calculateStdDevFromQ
class ContinuousSummaryHistogram(SummaryHistogram):
"""
_ContinuousSummaryHistogram_
A histogram where there are continuous points
with certain frequency, it follows
that there is only one value in Y and
that the average and standard deviation are
not calculated on the frequency values but the X values.
"""
def __init__(self, title, xLabel, yLabel = None,
roundingDecimals = 2, nBins = None,
dropOutliers = False, sigmaLimit = 5,
storeHistogram = True):
"""
__init__
Initialize a more complex histogram structure, containing different
data to calculate online average and standard deviations. This data is also
stored in the JSON to allow rebuilding and adding histograms.
All histograms are binned when requested, the resolution can be specified
through nBins, otherwise the value used is the one recommended in:
Wand, M.P. (1997), "Data-Based Choice of Histogram Bin Width," The American Statistician, 51, 59-64.
If specified, outlier farther than sigmaLimit standard deviations from the
mean will not be included in the binned histogram.
"""
# Initialize the parent object
SummaryHistogram.__init__(self, title, xLabel)
# Indicate this is a discrete histogram
self.continuous = True
# Add data only used in the continuous version
self.yLabel = yLabel
self.nPoints = 0
self.QValue = None
self.average = None
# Configuration parameters for the continuous histograms
self.roundingDecimals = roundingDecimals
self.fixedNBins = nBins
self.dropOutliers = dropOutliers
self.sigmaLimit = sigmaLimit
self.binned = False
self.storeHistogram = storeHistogram
# Override initialization of some attributes
self.average = 0.0
self.stdDev = 0.0
return
def addPoint(self, xValue, yLabel = None):
"""
_addPoint_
Add a point from a continuous set (only-numbers allowed currently) to the histogram data,
calculate the running average and standard deviation.
If no y-label had been specified before, one must be supplied
otherwise the given y-label must be either None or equal
to the stored value.
"""
if self.binned:
# Points can't be added to binned histograms!
raise Exception("Points can't be added to binned histograms")
if self.yLabel is None and yLabel is None:
raise Exception("Some y-label must be stored for the histogram")
elif self.yLabel is None:
self.yLabel = yLabel
elif yLabel is not None and self.yLabel != yLabel:
raise Exception("Only one y-label is allowed on continuous histograms")
if not validateNumericInput(xValue):
# Do nothing if it is not a number
return
xValue = float(xValue)
xValue = round(xValue, self.roundingDecimals)
if self.storeHistogram:
if xValue not in self.data:
self.data[xValue] = 0
self.data[xValue] += 1
self.nPoints += 1
(self.average, self.QValue) = calculateRunningAverageAndQValue(xValue, self.nPoints, self.average, self.QValue)
return
def __add__(self, other):
#TODO: For HG1302, support multiple agents properly in the workload summary
raise NotImplementedError
def toJSON(self):
"""
_toJSON_
Bin the histogram if any, calculate the standard deviation. Store
the internal data needed for reconstruction of the histogram
from JSON and call superclass toJSON method.
"""
if self.nPoints:
self.stdDev = calculateStdDevFromQ(self.QValue, self.nPoints)
if not self.binned and self.storeHistogram:
self.binHistogram()
self.jsonInternal = {}
self.jsonInternal['yLabel'] = self.yLabel
self.jsonInternal['QValue'] = self.QValue
self.jsonInternal['nPoints'] = self.nPoints
return SummaryHistogram.toJSON(self)
def binHistogram(self):
"""
_binHistogram_
Histograms of continuous data must be binned,
this takes care of that using given or optimal parameters.
Note that this modifies the data object,
and points can't be added to the histogram after this.
"""
if not self.nPoints:
return
self.binned = True
# Number of bins can be specified or calculated based on number of points
nBins = self.fixedNBins
if nBins is None:
nBins = int(math.floor((5.0 / 3.0) * math.pow(self.nPoints, 1.0 / 3.0)))
# Define min and max
if not self.dropOutliers:
upperLimit = max(self.data.keys())
lowerLimit = min(self.data.keys())
else:
stdDev = calculateStdDevFromQ(self.QValue, self.nPoints)
upperLimit = self.average + (stdDev * self.sigmaLimit)
lowerLimit = self.average - (stdDev * self.sigmaLimit)
# Incremental delta
delta = abs(float(upperLimit - lowerLimit)) / nBins
# Build the bins, it's a list of tuples for now
bins = []
a = lowerLimit
b = lowerLimit + delta
while len(bins) < nBins:
bins.append((a, b))
a += delta
b += delta
# Go through data and populate the binned histogram
binnedHisto = {}
currentBin = 0
currentPoint = 0
sortedData = sorted(self.data.keys())
while currentPoint < len(sortedData):
point = sortedData[currentPoint]
encodedTuple = "%s,%s" % (bins[currentBin][0], bins[currentBin][1])
if encodedTuple not in binnedHisto:
binnedHisto[encodedTuple] = 0
if point > upperLimit or point < lowerLimit:
currentPoint += 1
elif currentBin == len(bins) - 1:
binnedHisto[encodedTuple] += self.data[point]
currentPoint += 1
elif point >= bins[currentBin][0] and point < bins[currentBin][1]:
binnedHisto[encodedTuple] += self.data[point]
currentPoint += 1
else:
currentBin += 1
self.data = binnedHisto
return | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/DataStructs/MathStructs/ContinuousSummaryHistogram.py | 0.753058 | 0.633354 | ContinuousSummaryHistogram.py | pypi |
from WMCore.DataStructs.MathStructs.SummaryHistogram import SummaryHistogram
from WMCore.Algorithms.MathAlgos import getAverageStdDev
class DiscreteSummaryHistogram(SummaryHistogram):
"""
_DiscreteSummaryHistogram_
A histogram where the data is organized by
a finite number of categories, it can have
many values for each category.
"""
def __init__(self, title, xLabel):
"""
__init__
Initialize a simpler histogram that only stores
the histogram. Everything else is calculated when the JSON is requested.
"""
# Initialize the parent object
SummaryHistogram.__init__(self, title, xLabel)
# Indicate this is a discrete histogram
self.continuous = False
# Add data only used in the discrete version
self.yLabels = set()
# Override initialization of some attributes
self.average = {}
self.stdDev = {}
return
def addPoint(self, xValue, yLabel):
"""
_addPoint_
Add point to discrete histogram,
x value is a category and therefore not rounded.
There can be many yLabel and standard deviations are
not calculated online. Histograms are always stored.
"""
if xValue not in self.data:
# Record the category
self.data[xValue] = {}
for label in self.yLabels:
self.data[xValue][label] = 0
if yLabel not in self.yLabels:
# Record the label
self.yLabels.add(yLabel)
self.average[yLabel] = 0.0
self.stdDev[yLabel] = 0.0
for category in self.data:
self.data[category][yLabel] = 0
self.data[xValue][yLabel] += 1
return
def __add__(self, other):
#TODO: For HG1302, support multiple agents properly in the workload summary
raise NotImplementedError
def toJSON(self):
"""
_toJSON_
Calculate average and standard deviation, store it
and call the parent class toJSON method
"""
for yLabel in self.yLabels:
numList = []
for xValue in self.data:
numList.append(self.data[xValue][yLabel])
(self.average[yLabel], self.stdDev[yLabel]) = getAverageStdDev(numList)
return SummaryHistogram.toJSON(self) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/src/python/WMCore/DataStructs/MathStructs/DiscreteSummaryHistogram.py | 0.702122 | 0.544801 | DiscreteSummaryHistogram.py | pypi |
import logging
import sys
from collections import Counter
from WMCore.Services.DBS.DBS3Reader import DBS3Reader
from WMCore.Services.Rucio.Rucio import Rucio
RUCIO_ACCT = "wma_prod"
RUCIO_HOST = "http://cms-rucio.cern.ch"
RUCIO_AUTH = "https://cms-rucio-auth.cern.ch"
DBS_URL = "https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader"
def loggerSetup(logLevel=logging.INFO):
"""
Return a logger which writes everything to stdout.
"""
logger = logging.getLogger(__name__)
outHandler = logging.StreamHandler(sys.stdout)
outHandler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(module)s: %(message)s"))
outHandler.setLevel(logLevel)
logger.addHandler(outHandler)
logger.setLevel(logLevel)
return logger
def getFromRucio(dataset, logger):
"""
Using the WMCore Rucio object and fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, value is the amount of files.
"""
rucio = Rucio(acct=RUCIO_ACCT,
hostUrl=RUCIO_HOST,
authUrl=RUCIO_AUTH,
configDict={'logger': logger})
result = dict()
for block in rucio.getBlocksInContainer(dataset):
data = rucio.getDID(block)
result.setdefault(block, data['length'])
return result
def getFromDBS(dataset, logger):
"""
Uses the WMCore DBS3Reader object to fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, and an inner dictionary
with the number of valid and invalid files. It also returns a total counter
for the number of valid and invalid files in the dataset.
"""
dbsReader = DBS3Reader(DBS_URL, logger)
result = dict()
dbsFilesCounter = Counter({'valid': 0, 'invalid': 0})
blocks = dbsReader.listFileBlocks(dataset)
for block in blocks:
data = dbsReader.dbs.listFileArray(block_name=block, validFileOnly=0, detail=True)
result.setdefault(block, Counter({'valid': 0, 'invalid': 0}))
for fileInfo in data:
if fileInfo['is_file_valid'] == 1:
result[block]['valid'] += 1
dbsFilesCounter['valid'] += 1
else:
result[block]['invalid'] += 1
dbsFilesCounter['invalid'] += 1
return result, dbsFilesCounter
def main():
"""
Expects a dataset name as input argument.
It then queries Rucio and DBS and compare their blocks and
number of files.
"""
if len(sys.argv) != 2:
print("A dataset name must be provided in the command line")
sys.exit(1)
datasetName = sys.argv[1]
logger = loggerSetup(logging.INFO)
rucioOutput = getFromRucio(datasetName, logger)
dbsOutput, dbsFilesCounter = getFromDBS(datasetName, logger)
logger.info("*** Dataset: %s", datasetName)
logger.info("Rucio file count : %s", sum(rucioOutput.values()))
logger.info("DBS file count : %s", dbsFilesCounter['valid'] + dbsFilesCounter['invalid'])
logger.info(" - valid files : %s", dbsFilesCounter['valid'])
logger.info(" - invalid files : %s", dbsFilesCounter['invalid'])
logger.info("Blocks in Rucio but not in DBS: %s", set(rucioOutput.keys()) - set(dbsOutput.keys()))
logger.info("Blocks in DBS but not in Rucio: %s", set(dbsOutput.keys()) - set(rucioOutput.keys()))
for blockname in rucioOutput:
if blockname not in dbsOutput:
logger.error("This block does not exist in DBS: %s", blockname)
continue
if rucioOutput[blockname] != sum(dbsOutput[blockname].values()):
logger.warning("Block with file mismatch: %s", blockname)
logger.warning("\tRucio: %s\t\tDBS: %s", rucioOutput[blockname], sum(dbsOutput[blockname].values()))
if __name__ == "__main__":
sys.exit(main()) | /reqmgr2ms-pileup-2.2.4rc1.tar.gz/reqmgr2ms-pileup-2.2.4rc1/bin/adhoc-scripts/checkDsetFileCount.py | 0.413477 | 0.306037 | checkDsetFileCount.py | pypi |
from textwrap import TextWrapper
from collections import OrderedDict
def twClosure(replace_whitespace=False,
break_long_words=False,
maxWidth=120,
maxLength=-1,
maxDepth=-1,
initial_indent=''):
"""
Deals with indentation of dictionaries with very long key, value pairs.
replace_whitespace: Replace each whitespace character with a single space.
break_long_words: If True words longer than width will be broken.
width: The maximum length of wrapped lines.
initial_indent: String that will be prepended to the first line of the output
Wraps all strings for both keys and values to 120 chars.
Uses 4 spaces indentation for both keys and values.
Nested dictionaries and lists go to next line.
"""
twr = TextWrapper(replace_whitespace=replace_whitespace,
break_long_words=break_long_words,
width=maxWidth,
initial_indent=initial_indent)
def twEnclosed(obj, ind='', depthReached=0, reCall=False):
"""
The inner function of the closure
ind: Initial indentation for the single output string
reCall: Flag to indicate a recursive call (should not be used outside)
"""
output = ''
if isinstance(obj, dict):
obj = OrderedDict(sorted(list(obj.items()),
key=lambda t: t[0],
reverse=False))
if reCall:
output += '\n'
ind += ' '
depthReached += 1
lengthReached = 0
for key, value in list(obj.items()):
lengthReached += 1
if lengthReached > maxLength and maxLength >= 0:
output += "%s...\n" % ind
break
if depthReached <= maxDepth or maxDepth < 0:
output += "%s%s: %s" % (ind,
''.join(twr.wrap(key)),
twEnclosed(value, ind, depthReached=depthReached, reCall=True))
elif isinstance(obj, (list, set)):
if reCall:
output += '\n'
ind += ' '
lengthReached = 0
for value in obj:
lengthReached += 1
if lengthReached > maxLength and maxLength >= 0:
output += "%s...\n" % ind
break
if depthReached <= maxDepth or maxDepth < 0:
output += "%s%s" % (ind, twEnclosed(value, ind, depthReached=depthReached, reCall=True))
else:
output += "%s\n" % str(obj) # join(twr.wrap(str(obj)))
return output
return twEnclosed
def twPrint(obj, maxWidth=120, maxLength=-1, maxDepth=-1):
"""
A simple caller of twClosure (see docstring for twClosure)
"""
twPrinter = twClosure(maxWidth=maxWidth,
maxLength=maxLength,
maxDepth=maxDepth)
print(twPrinter(obj))
def twFormat(obj, maxWidth=120, maxLength=-1, maxDepth=-1):
"""
A simple caller of twClosure (see docstring for twClosure)
"""
twFormatter = twClosure(maxWidth=maxWidth,
maxLength=maxLength,
maxDepth=maxDepth)
return twFormatter(obj) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/Utils/TwPrint.py | 0.757436 | 0.299387 | TwPrint.py | pypi |
import io
import os
import stat
import subprocess
import time
import zlib
from Utils.Utilities import decodeBytesToUnicode
def calculateChecksums(filename):
"""
_calculateChecksums_
Get the adler32 and crc32 checksums of a file. Return None on error
Process line by line and adjust for known signed vs. unsigned issues
http://docs.python.org/library/zlib.html
The cksum UNIX command line tool implements a CRC32 checksum that is
different than any of the python algorithms, therefore open cksum
in a subprocess and feed it the same chunks of data that are used
to calculate the adler32 checksum.
"""
adler32Checksum = 1 # adler32 of an empty string
cksumProcess = subprocess.Popen("cksum", stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# the lambda basically creates an iterator function with zero
# arguments that steps through the file in 4096 byte chunks
with open(filename, 'rb') as f:
for chunk in iter((lambda: f.read(4096)), b''):
adler32Checksum = zlib.adler32(chunk, adler32Checksum)
cksumProcess.stdin.write(chunk)
cksumProcess.stdin.close()
cksumProcess.wait()
cksumStdout = cksumProcess.stdout.read().split()
cksumProcess.stdout.close()
# consistency check on the cksum output
filesize = os.stat(filename)[stat.ST_SIZE]
if len(cksumStdout) != 2 or int(cksumStdout[1]) != filesize:
raise RuntimeError("Something went wrong with the cksum calculation !")
cksumStdout[0] = decodeBytesToUnicode(cksumStdout[0])
return (format(adler32Checksum & 0xffffffff, '08x'), cksumStdout[0])
def tail(filename, nLines=20):
"""
_tail_
A version of tail
Adapted from code on http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
assert nLines >= 0
pos, lines = nLines + 1, []
# make sure only valid utf8 encoded chars will be passed along
with io.open(filename, 'r', encoding='utf8', errors='ignore') as f:
while len(lines) <= nLines:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = list(f)
pos *= 2
text = "".join(lines[-nLines:])
return text
def getFileInfo(filename):
"""
_getFileInfo_
Return file info in a friendly format
"""
filestats = os.stat(filename)
fileInfo = {'Name': filename,
'Size': filestats[stat.ST_SIZE],
'LastModification': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_MTIME])),
'LastAccess': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_ATIME]))}
return fileInfo
def findMagicStr(filename, matchString):
"""
_findMagicStr_
Parse a log file looking for a pattern string
"""
with io.open(filename, 'r', encoding='utf8', errors='ignore') as logfile:
# TODO: can we avoid reading the whole file
for line in logfile:
if matchString in line:
yield line
def getFullPath(name, envPath="PATH"):
"""
:param name: file name
:param envPath: any environment variable specified for path (PATH, PYTHONPATH, etc)
:return: full path if it is under PATH env
"""
for path in os.getenv(envPath).split(os.path.pathsep):
fullPath = os.path.join(path, name)
if os.path.exists(fullPath):
return fullPath
return None | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/Utils/FileTools.py | 0.555556 | 0.398641 | FileTools.py | pypi |
from builtins import str, bytes
def portForward(port):
"""
Decorator wrapper function for port forwarding of the REST calls of any
function to a given port.
Currently there are three constraints for applying this decorator.
1. The function to be decorated must be defined within a class and not being a static method.
The reason for that is because we need to be sure the function's signature will
always include the class instance as its first argument.
2. The url argument must be present as the second one in the positional argument list
of the decorated function (right after the class instance argument).
3. The url must follow the syntax specifications in RFC 1808:
https://tools.ietf.org/html/rfc1808.html
If all of the above constraints are fulfilled and the url is part of the
urlMangleList, then the url is parsed and the port is substituted with the
one provided as an argument to the decorator's wrapper function.
param port: The port to which the REST call should be forwarded.
"""
def portForwardDecorator(callFunc):
"""
The actual decorator
"""
def portMangle(callObj, url, *args, **kwargs):
"""
Function used to check if the url coming with the current argument list
is to be forwarded and if so change the port to the one provided as an
argument to the decorator wrapper.
:param classObj: This is the class object (slef from within the class)
which is always to be present in the signature of a
public method. We will never use this argument, but
we need it there for not breaking the positional
argument order
:param url: This is the actual url to be (eventually) forwarded
:param *args: The positional argument list coming from the original function
:param *kwargs: The keywords argument list coming from the original function
"""
forwarded = False
try:
if isinstance(url, str):
urlToMangle = 'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace('.cern.ch/', '.cern.ch:%d/' % port, 1)
forwarded = True
elif isinstance(url, bytes):
urlToMangle = b'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace(b'.cern.ch/', b'.cern.ch:%d/' % port, 1)
forwarded = True
except Exception:
pass
if forwarded:
return callFunc(callObj, newUrl, *args, **kwargs)
else:
return callFunc(callObj, url, *args, **kwargs)
return portMangle
return portForwardDecorator
class PortForward():
"""
A class with a call method implementing a simple way to use the functionality
provided by the protForward decorator as a pure functional call:
EXAMPLE:
from Utils.PortForward import PortForward
portForwarder = PortForward(8443)
url = 'https://cmsweb-testbed.cern.ch/couchdb'
url = portForwarder(url)
"""
def __init__(self, port):
"""
The init method for the PortForward call class. This one is supposed
to simply provide an initial class instance with a logger.
"""
self.port = port
def __call__(self, url):
"""
The call method for the PortForward class
"""
def dummyCall(self, url):
return url
return portForward(self.port)(dummyCall)(self, url) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/Utils/PortForward.py | 0.825273 | 0.496277 | PortForward.py | pypi |
from builtins import object
from functools import reduce
class Functor(object):
"""
A simple functor class used to construct a function call which later to be
applied on an (any type) object.
NOTE:
It expects a function in the constructor and an (any type) object
passed to the run or __call__ methods, which methods once called they
construct and return the following function:
func(obj, *args, **kwargs)
NOTE:
All the additional arguments which the function may take must be set in
the __init__ method. If any of them are passed during run time an error
will be raised.
:func:
The function to which the rest of the constructor arguments are about
to be attached and then the newly created function will be returned.
- The function needs to take at least one parameter since the object
passed to the run/__call__ methods will always be put as a first
argument to the function.
:Example:
def adder(a, b, *args, **kwargs):
if args:
print("adder args: %s" % args)
if kwargs:
print("adder kwargs: %s" % kwargs)
res = a + b
return res
>>> x=Functor(adder, 8, 'foo', bar=True)
>>> x(2)
adder args: foo
adder kwargs: {'bar': True}
adder res: 10
10
>>> x
<Pipeline.Functor instance at 0x7f319bbaeea8>
"""
def __init__(self, func, *args, **kwargs):
"""
The init method for class Functor
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, obj):
"""
The call method for class Functor
"""
return self.run(obj)
def run(self, obj):
return self.func(obj, *self.args, **self.kwargs)
class Pipeline(object):
"""
A simple Functional Pipeline Class: applies a set of functions to an object,
where the output of every previous function is an input to the next one.
"""
# NOTE:
# Similar and inspiring approaches but yet some different implementations
# are discussed in the following two links [1] & [2]. With a quite good
# explanation in [1], which helped a lot. All in all at the bottom always
# sits the reduce function.
# [1]
# https://softwarejourneyman.com/python-function-pipelines.html
# [2]
# https://gitlab.com/mc706/functional-pipeline
def __init__(self, funcLine=None, name=None):
"""
:funcLine: A list of functions or Functors of function + arguments (see
the Class definition above) that are to be applied sequentially
to the object.
- If any of the elements of 'funcLine' is a function, a direct
function call with the object as an argument is performed.
- If any of the elements of 'funcLine' is a Functor, then the
first argument of the Functor constructor is the function to
be evaluated and the object is passed as a first argument to
the function with all the rest of the arguments passed right
after it eg. the following Functor in the funcLine:
Functor(func, 'foo', bar=True)
will result in the following function call later when the
pipeline is executed:
func(obj, 'foo', bar=True)
:Example:
(using the adder function from above and an object of type int)
>>> pipe = Pipeline([Functor(adder, 5),
Functor(adder, 6),
Functor(adder, 7, "extraArg"),
Functor(adder, 8, update=True)])
>>> pipe.run(1)
adder res: 6
adder res: 12
adder args: extraArg
adder res: 19
adder kwargs: {'update': True}
adder res: 27
"""
self.funcLine = funcLine or []
self.name = name
def getPipelineName(self):
"""
__getPipelineName__
"""
name = self.name or "Unnamed Pipeline"
return name
def run(self, obj):
return reduce(lambda obj, functor: functor(obj), self.funcLine, obj) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/Utils/Pipeline.py | 0.750918 | 0.499512 | Pipeline.py | pypi |
# system modules
import os
import ssl
import time
import logging
import traceback
# third part library
try:
import jwt
except ImportError:
traceback.print_exc()
jwt = None
from Utils.Utilities import encodeUnicodeToBytes
# prevent "SSL: CERTIFICATE_VERIFY_FAILED" error
# this will cause pylint warning W0212, therefore we ignore it above
ssl._create_default_https_context = ssl._create_unverified_context
def readToken(name=None):
"""
Read IAM token either from environment or file name
:param name: ether file name containing token or environment name which hold the token value.
If not provided it will be assumed to read token from IAM_TOKEN environment.
:return: token or None
"""
if name and os.path.exists(name):
token = None
with open(name, 'r', encoding='utf-8') as istream:
token = istream.read()
return token
if name:
return os.environ.get(name)
return os.environ.get("IAM_TOKEN")
def tokenData(token, url="https://cms-auth.web.cern.ch/jwk", audUrl="https://wlcg.cern.ch/jwt/v1/any"):
"""
inspect and extract token data
:param token: token string
:param url: IAM provider URL
:param audUrl: audience string
"""
if not token or not jwt:
return {}
if isinstance(token, str):
token = encodeUnicodeToBytes(token)
jwksClient = jwt.PyJWKClient(url)
signingKey = jwksClient.get_signing_key_from_jwt(token)
key = signingKey.key
headers = jwt.get_unverified_header(token)
alg = headers.get('alg', 'RS256')
data = jwt.decode(
token,
key,
algorithms=[alg],
audience=audUrl,
options={"verify_exp": True},
)
return data
def isValidToken(token):
"""
check if given token is valid or not
:param token: token string
:return: true or false
"""
tokenDict = {}
tokenDict = tokenData(token)
exp = tokenDict.get('exp', 0) # expire, seconds since epoch
if not exp or exp < time.time():
return False
return True
class TokenManager():
"""
TokenManager class handles IAM tokens
"""
def __init__(self,
name=None,
url="https://cms-auth.web.cern.ch/jwk",
audUrl="https://wlcg.cern.ch/jwt/v1/any",
logger=None):
"""
Token manager reads IAM tokens either from file or env.
It caches token along with expiration timestamp.
By default the env variable to use is IAM_TOKEN.
:param name: string representing either file or env where we should read token from
:param url: IAM provider URL
:param audUrl: audience string
:param logger: logger object or none to use default one
"""
self.name = name
self.url = url
self.audUrl = audUrl
self.expire = 0
self.token = None
self.logger = logger if logger else logging.getLogger()
try:
self.token = self.getToken()
except Exception as exc:
self.logger.exception("Failed to get token. Details: %s", str(exc))
def getToken(self):
"""
Return valid token and sets its expire timestamp
"""
if not self.token or not isValidToken(self.token):
self.token = readToken(self.name)
tokenDict = {}
try:
tokenDict = tokenData(self.token, url=self.url, audUrl=self.audUrl)
self.logger.debug(tokenDict)
except Exception as exc:
self.logger.exception(str(exc))
raise
self.expire = tokenDict.get('exp', 0)
return self.token
def getLifetime(self):
"""
Return reamaining lifetime of existing token
"""
return self.expire - int(time.time()) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/Utils/TokenManager.py | 0.66061 | 0.165863 | TokenManager.py | pypi |
from copy import copy
from builtins import object
from time import time
class MemoryCacheException(Exception):
def __init__(self, message):
super(MemoryCacheException, self).__init__(message)
class MemoryCache():
__slots__ = ["lastUpdate", "expiration", "_cache"]
def __init__(self, expiration, initialData=None):
"""
Initializes cache object
:param expiration: expiration time in seconds
:param initialData: initial value for the cache
"""
self.lastUpdate = int(time())
self.expiration = expiration
self._cache = initialData
def __contains__(self, item):
"""
Check whether item is in the current cache
:param item: a simple object (string, integer, etc)
:return: True if the object can be found in the cache, False otherwise
"""
return item in self._cache
def __getitem__(self, keyName):
"""
If the cache is a dictionary, return that item from the cache. Else, raise an exception.
:param keyName: the key name from the dictionary
"""
if isinstance(self._cache, dict):
return copy(self._cache.get(keyName))
else:
raise MemoryCacheException("Cannot retrieve an item from a non-dict MemoryCache object: {}".format(self._cache))
def reset(self):
"""
Resets the cache to its current data type
"""
if isinstance(self._cache, (dict, set)):
self._cache.clear()
elif isinstance(self._cache, list):
del self._cache[:]
else:
raise MemoryCacheException("The cache needs to be reset manually, data type unknown")
def isCacheExpired(self):
"""
Evaluate whether the cache has already expired, returning
True if it did, otherwise it returns False
"""
return self.lastUpdate + self.expiration < int(time())
def getCache(self):
"""
Raises an exception if the cache has expired, otherwise returns
its data
"""
if self.isCacheExpired():
expiredSince = int(time()) - (self.lastUpdate + self.expiration)
raise MemoryCacheException("Memory cache expired for %d seconds" % expiredSince)
return self._cache
def setCache(self, inputData):
"""
Refresh the cache with the content provided (refresh its expiration as well)
This method enforces the user to not change the cache data type
:param inputData: data to store in the cache
"""
if not isinstance(self._cache, type(inputData)):
raise TypeError("Current cache data type: %s, while new value is: %s" %
(type(self._cache), type(inputData)))
self.reset()
self.lastUpdate = int(time())
self._cache = inputData
def addItemToCache(self, inputItem):
"""
Adds new item(s) to the cache, without resetting its expiration.
It, of course, only works for data caches of type: list, set or dict.
:param inputItem: additional item to be added to the current cached data
"""
if isinstance(self._cache, set) and isinstance(inputItem, (list, set)):
# extend another list or set into a set
self._cache.update(inputItem)
elif isinstance(self._cache, set) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a set
self._cache.add(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (list, set)):
# extend another list or set into a list
self._cache.extend(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a list
self._cache.append(inputItem)
elif isinstance(self._cache, dict) and isinstance(inputItem, dict):
self._cache.update(inputItem)
else:
msg = "Input item type: %s cannot be added to a cache type: %s" % (type(self._cache), type(inputItem))
raise TypeError("Cache and input item data type mismatch. %s" % msg) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/Utils/MemoryCache.py | 0.774796 | 0.226185 | MemoryCache.py | pypi |
from builtins import object
import logging
import time
import calendar
from datetime import tzinfo, timedelta
def gmtimeSeconds():
"""
Return GMT time in seconds
"""
return int(time.mktime(time.gmtime()))
def encodeTimestamp(secs):
"""
Encode second since epoch to a string GMT timezone representation
:param secs: input timestamp value (either int or float) in seconds since epoch
:return: time string in GMT timezone representation
"""
if not isinstance(secs, (int, float)):
raise Exception("Wrong input, should be seconds since epoch either int or float value")
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(int(secs)))
def decodeTimestamp(timeString):
"""
Decode timestamps in provided document
:param timeString: timestamp string represention in GMT timezone, see encodeTimestamp
:return: seconds since ecouch in GMT timezone
"""
if not isinstance(timeString, str):
raise Exception("Wrong input, should be time string in GMT timezone representation")
return calendar.timegm(time.strptime(timeString, "%Y-%m-%dT%H:%M:%SZ"))
def timeFunction(func):
"""
source: https://www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
Decorator function to measure how long a method/function takes to run
It returns a tuple with:
* wall clock time spent
* returned result of the function
* the function name
"""
def wrapper(*arg, **kw):
t1 = time.time()
res = func(*arg, **kw)
t2 = time.time()
return round((t2 - t1), 4), res, func.__name__
return wrapper
class CodeTimer(object):
"""
A context manager for timing function calls.
Adapted from https://www.blog.pythonlibrary.org/2016/05/24/python-101-an-intro-to-benchmarking-your-code/
Use like
with CodeTimer(label='Doing something'):
do_something()
"""
def __init__(self, label='The function', logger=None):
self.start = time.time()
self.label = label
self.logger = logger or logging.getLogger()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
runtime = round((end - self.start), 3)
self.logger.info(f"{self.label} took {runtime} seconds to complete")
class LocalTimezone(tzinfo):
"""
A required python 2 class to determine current timezone for formatting rfc3339 timestamps
Required for sending alerts to the MONIT AlertManager
Can be removed once WMCore starts using python3
Details of class can be found at: https://docs.python.org/2/library/datetime.html#tzinfo-objects
"""
def __init__(self):
super(LocalTimezone, self).__init__()
self.ZERO = timedelta(0)
self.STDOFFSET = timedelta(seconds=-time.timezone)
if time.daylight:
self.DSTOFFSET = timedelta(seconds=-time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return self.ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0 | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/Utils/Timers.py | 0.817028 | 0.254266 | Timers.py | pypi |
import copy
import unittest
class ExtendedUnitTestCase(unittest.TestCase):
"""
Class that can be imported to switch to 'mock'ed versions of
services.
"""
def assertContentsEqual(self, expected_obj, actual_obj, msg=None):
"""
A nested object comparison without regard for the ordering of contents. It asserts that
expected_obj and actual_obj contain the same elements and that their sub-elements are the same.
However, all sequences are allowed to contain the same elements, but in different orders.
"""
def traverse_dict(dictionary):
for key, value in list(dictionary.items()):
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
return
def get_dict_sortkey(x):
if isinstance(x, dict):
return list(x.keys())
else:
return x
def traverse_list(theList):
for value in theList:
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
theList.sort(key=get_dict_sortkey)
return
if not isinstance(expected_obj, type(actual_obj)):
self.fail(msg="The two objects are different type and cannot be compared: %s and %s" % (
type(expected_obj), type(actual_obj)))
expected = copy.deepcopy(expected_obj)
actual = copy.deepcopy(actual_obj)
if isinstance(expected, dict):
traverse_dict(expected)
traverse_dict(actual)
elif isinstance(expected, list):
traverse_list(expected)
traverse_list(actual)
else:
self.fail(msg="The two objects are different type (%s) and cannot be compared." % type(expected_obj))
return self.assertEqual(expected, actual) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/Utils/ExtendedUnitTestCase.py | 0.664758 | 0.501587 | ExtendedUnitTestCase.py | pypi |
from builtins import str, bytes
import subprocess
import os
import re
import zlib
import base64
import sys
from types import ModuleType, FunctionType
from gc import get_referents
def lowerCmsHeaders(headers):
"""
Lower CMS headers in provided header's dict. The WMCore Authentication
code check only cms headers in lower case, e.g. cms-xxx-yyy.
"""
lheaders = {}
for hkey, hval in list(headers.items()): # perform lower-case
# lower header keys since we check lower-case in headers
if hkey.startswith('Cms-') or hkey.startswith('CMS-'):
lheaders[hkey.lower()] = hval
else:
lheaders[hkey] = hval
return lheaders
def makeList(stringList):
"""
_makeList_
Make a python list out of a comma separated list of strings,
throws a ValueError if the input is not well formed.
If the stringList is already of type list, then return it untouched.
"""
if isinstance(stringList, list):
return stringList
if isinstance(stringList, str):
toks = stringList.lstrip(' [').rstrip(' ]').split(',')
if toks == ['']:
return []
return [str(tok.strip(' \'"')) for tok in toks]
raise ValueError("Can't convert to list %s" % stringList)
def makeNonEmptyList(stringList):
"""
_makeNonEmptyList_
Given a string or a list of strings, return a non empty list of strings.
Throws an exception in case the final list is empty or input data is not
a string or a python list
"""
finalList = makeList(stringList)
if not finalList:
raise ValueError("Input data cannot be an empty list %s" % stringList)
return finalList
def strToBool(string):
"""
Try to convert different variations of True or False (including a string
type object) to a boolean value.
In short:
* True gets mapped from: True, "True", "true", "TRUE".
* False gets mapped from: False, "False", "false", "FALSE"
* anything else will fail
:param string: expects a boolean or a string, but it could be anything else
:return: a boolean value, or raise an exception if value passed in is not supported
"""
if string is False or string is True:
return string
elif string in ["True", "true", "TRUE"]:
return True
elif string in ["False", "false", "FALSE"]:
return False
raise ValueError("Can't convert to bool: %s" % string)
def safeStr(string):
"""
_safeStr_
Cast simple data (int, float, basestring) to string.
"""
if not isinstance(string, (tuple, list, set, dict)):
return str(string)
raise ValueError("We're not supposed to convert %s to string." % string)
def diskUse():
"""
This returns the % use of each disk partition
"""
diskPercent = []
df = subprocess.Popen(["df", "-klP"], stdout=subprocess.PIPE)
output = df.communicate()[0]
output = decodeBytesToUnicode(output).split("\n")
for x in output:
split = x.split()
if split != [] and split[0] != 'Filesystem':
diskPercent.append({'mounted': split[5], 'percent': split[4]})
return diskPercent
def numberCouchProcess():
"""
This returns the number of couch process
"""
ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
process = ps.communicate()[0]
process = decodeBytesToUnicode(process).count('couchjs')
return process
def rootUrlJoin(base, extend):
"""
Adds a path element to the path within a ROOT url
"""
if base:
match = re.match("^root://([^/]+)/(.+)", base)
if match:
host = match.group(1)
path = match.group(2)
newpath = os.path.join(path, extend)
newurl = "root://%s/%s" % (host, newpath)
return newurl
return None
def zipEncodeStr(message, maxLen=5120, compressLevel=9, steps=100, truncateIndicator=" (...)"):
"""
_zipEncodeStr_
Utility to zip a string and encode it.
If zipped encoded length is greater than maxLen,
truncate message until zip/encoded version
is within the limits allowed.
"""
message = encodeUnicodeToBytes(message)
encodedStr = zlib.compress(message, compressLevel)
encodedStr = base64.b64encode(encodedStr)
if len(encodedStr) < maxLen or maxLen == -1:
return encodedStr
compressRate = 1. * len(encodedStr) / len(base64.b64encode(message))
# Estimate new length for message zip/encoded version
# to be less than maxLen.
# Also, append truncate indicator to message.
truncateIndicator = encodeUnicodeToBytes(truncateIndicator)
strLen = int((maxLen - len(truncateIndicator)) / compressRate)
message = message[:strLen] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
# If new length is not short enough, truncate
# recursively by steps
while len(encodedStr) > maxLen:
message = message[:-steps - len(truncateIndicator)] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
return encodedStr
def getSize(obj):
"""
_getSize_
Function to traverse an object and calculate its total size in bytes
:param obj: a python object
:return: an integer representing the total size of the object
Code extracted from Stack Overflow:
https://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
"""
# Custom objects know their class.
# Function objects seem to know way too much, including modules.
# Exclude modules as well.
BLACKLIST = type, ModuleType, FunctionType
if isinstance(obj, BLACKLIST):
raise TypeError('getSize() does not take argument of type: '+ str(type(obj)))
seen_ids = set()
size = 0
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, BLACKLIST) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
return size
def decodeBytesToUnicode(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of bytes (i.e. in py2 `str` or
`future.types.newbytes.newbytes`, in py3 `bytes`), then it is converted to
a sequence of unicode codepoints.
This function is useful for cleaning input data when using the
"unicode sandwich" approach, which involves converting bytes (i.e. strings
of type sequence of bytes) to unicode (i.e. strings of type sequence of
unicode codepoints, in py2 `unicode` or `future.types.newstr.newstr`,
in py3 `str` ) as soon as possible when recieving input data, and
converting unicode back to bytes as late as possible.
achtung!:
- converting unicode back to bytes is not covered by this function
- converting unicode back to bytes is not always necessary. when in doubt,
do not do it.
Reference: https://nedbatchelder.com/text/unipain.html
py2:
- "errors" can be: "strict", "ignore", "replace",
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, bytes):
return value.decode("utf-8", errors)
return value
def decodeBytesToUnicodeConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call decodeBytesToUnicode(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply decodeBytesToUnicode,
maintaining brevity.
Parameters
----------
value : any
passed to decodeBytesToUnicode
errors: str
passed to decodeBytesToUnicode
condition: boolean of object with attribute __bool__()
if True, then we run decodeBytesToUnicode. Usually PY2/PY3
"""
if condition:
return decodeBytesToUnicode(value, errors)
return value
def encodeUnicodeToBytes(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of unicode (i.e. in py2 `unicode` or
`future.types.newstr.newstr`, in py3 `str`), then it is converted to
a sequence of bytes.
This function is useful for encoding output data when using the
"unicode sandwich" approach, which involves converting unicode (i.e. strings
of type sequence of unicode codepoints) to bytes (i.e. strings of type
sequence of bytes, in py2 `str` or `future.types.newbytes.newbytes`,
in py3 `bytes`) as late as possible when passing a string to a third-party
function that only accepts bytes as input (pycurl's curl.setop is an
example).
py2:
- "errors" can be: "strict", "ignore", "replace", "xmlcharrefreplace"
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace",
"xmlcharrefreplace", "namereplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, str):
return value.encode("utf-8", errors)
return value
def encodeUnicodeToBytesConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call encodeUnicodeToBytes(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply encodeUnicodeToBytes,
maintaining brevity.
Parameters
----------
value : any
passed to encodeUnicodeToBytes
errors: str
passed to encodeUnicodeToBytes
condition: boolean of object with attribute __bool__()
if True, then we run encodeUnicodeToBytes. Usually PY2/PY3
"""
if condition:
return encodeUnicodeToBytes(value, errors)
return value | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/Utils/Utilities.py | 0.53777 | 0.283586 | Utilities.py | pypi |
import json
import urllib
from urllib.parse import urlparse, parse_qs, quote_plus
from collections import defaultdict
from Utils.CertTools import cert, ckey
from dbs.apis.dbsClient import aggFileLumis, aggFileParents
from WMCore.Services.pycurl_manager import getdata as multi_getdata
from Utils.PortForward import PortForward
def dbsListFileParents(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileParents API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file parents
"""
urls = ['%s/fileparents?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileParents
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsListFileLumis(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileLumis API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file lumis
"""
urls = ['%s/filelumis?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileLumis
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsBlockOrigin(dbsUrl, blocks):
"""
Concurrent counter part of DBS files API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of block origins for a given parent lfns
"""
urls = ['%s/blockorigin?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = None
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsParentFilesGivenParentDataset(dbsUrl, parentDataset, fInfo):
"""
Obtain parent files for given fileInfo object
:param dbsUrl: DBS URL
:param parentDataset: parent dataset name
:param fInfo: file info object
:return: list of parent files for given file info object
"""
portForwarder = PortForward(8443)
urls = []
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
urls.append(portForwarder(url))
func = None
uKey = None
rdict = getUrls(urls, func, uKey)
parentFiles = defaultdict(set)
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
url = portForwarder(url)
if url in rdict:
pFileList = rdict[url]
pFiles = {x['logical_file_name'] for x in pFileList}
parentFiles[fileInfo['logical_file_name']] = \
parentFiles[fileInfo['logical_file_name']].union(pFiles)
return parentFiles
def getUrls(urls, aggFunc, uKey=None):
"""
Perform parallel DBS calls for given set of urls and apply given aggregation
function to the results.
:param urls: list of DBS urls to call
:param aggFunc: aggregation function
:param uKey: url parameter to use for final dictionary
:return: dictionary of resuls where keys are urls and values are obtained results
"""
data = multi_getdata(urls, ckey(), cert())
rdict = {}
for row in data:
url = row['url']
code = int(row.get('code', 200))
error = row.get('error')
if code != 200:
msg = f"Fail to query {url}. Error: {code} {error}"
raise RuntimeError(msg)
if uKey:
key = urlParams(url).get(uKey)
else:
key = url
data = row.get('data', [])
res = json.loads(data)
if aggFunc:
rdict[key] = aggFunc(res)
else:
rdict[key] = res
return rdict
def urlParams(url):
"""
Return dictionary of URL parameters
:param url: URL link
:return: dictionary of URL parameters
"""
parsedUrl = urlparse(url)
rdict = parse_qs(parsedUrl.query)
for key, vals in rdict.items():
if len(vals) == 1:
rdict[key] = vals[0]
return rdict | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Services/DBS/DBSUtils.py | 0.572484 | 0.162746 | DBSUtils.py | pypi |
from __future__ import (division, print_function)
from builtins import str, bytes
from Utils.Utilities import encodeUnicodeToBytes
from io import BytesIO
import re
import xml.etree.cElementTree as ET
int_number_pattern = re.compile(r'(^[0-9-]$|^[0-9-][0-9]*$)')
float_number_pattern = re.compile(r'(^[-]?\d+\.\d*$|^\d*\.{1,1}\d+$)')
def adjust_value(value):
"""
Change null value to None.
"""
pat_float = float_number_pattern
pat_integer = int_number_pattern
if isinstance(value, str):
if value == 'null' or value == '(null)':
return None
elif pat_float.match(value):
return float(value)
elif pat_integer.match(value):
return int(value)
else:
return value
else:
return value
def xml_parser(data, prim_key):
"""
Generic XML parser
:param data: can be of type "file object", unicode string or bytes string
"""
if isinstance(data, (str, bytes)):
stream = BytesIO()
data = encodeUnicodeToBytes(data, "ignore")
stream.write(data)
stream.seek(0)
else:
stream = data
context = ET.iterparse(stream)
for event, elem in context:
row = {}
key = elem.tag
if key != prim_key:
continue
row[key] = elem.attrib
get_children(elem, event, row, key)
elem.clear()
yield row
def get_children(elem, event, row, key):
"""
xml_parser helper function. It gets recursively information about
children for given element tag. Information is stored into provided
row for given key. The change of notations can be applied during
parsing step by using provided notations dictionary.
"""
for child in elem.getchildren():
child_key = child.tag
child_data = child.attrib
if not child_data:
child_dict = adjust_value(child.text)
else:
child_dict = child_data
if child.getchildren(): # we got grand-children
if child_dict:
row[key][child_key] = child_dict
else:
row[key][child_key] = {}
if isinstance(child_dict, dict):
newdict = {child_key: child_dict}
else:
newdict = {child_key: {}}
get_children(child, event, newdict, child_key)
row[key][child_key] = newdict[child_key]
else:
if not isinstance(row[key], dict):
row[key] = {}
row[key].setdefault(child_key, [])
row[key][child_key].append(child_dict)
child.clear() | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Services/TagCollector/XMLUtils.py | 0.567697 | 0.201794 | XMLUtils.py | pypi |
from __future__ import division
from builtins import object
from datetime import timedelta, datetime
import socket
import json
import logging
from WMCore.Services.pycurl_manager import RequestHandler
from Utils.Timers import LocalTimezone
class AlertManagerAPI(object):
"""
A class used to send alerts via the MONIT AlertManager API
"""
def __init__(self, alertManagerUrl, logger=None):
self.alertManagerUrl = alertManagerUrl
# sender's hostname is added as an annotation
self.hostname = socket.gethostname()
self.mgr = RequestHandler()
self.ltz = LocalTimezone()
self.headers = {"Content-Type": "application/json"}
self.validSeverity = ["high", "medium", "low"]
self.logger = logger if logger else logging.getLogger()
def sendAlert(self, alertName, severity, summary, description, service, tag="wmcore", endSecs=600, generatorURL=""):
"""
:param alertName: a unique name for the alert
:param severity: low, medium, high
:param summary: a short description of the alert
:param description: a longer informational message with details about the alert
:param service: the name of the service firing an alert
:param tag: a unique tag used to help route the alert
:param endSecs: how many minutes until the alarm is silenced
:param generatorURL: this URL will be sent to AlertManager and configured as a clickable "Source" link in the web interface
AlertManager JSON format reference: https://www.prometheus.io/docs/alerting/latest/clients/
[
{
"labels": {
"alertname": "<requiredAlertName>",
"<labelname>": "<labelvalue>",
...
},
"annotations": {
"<labelname>": "<labelvalue>",
...
},
"startsAt": "<rfc3339>", # optional, will be current time if not present
"endsAt": "<rfc3339>",
"generatorURL": "<generator_url>" # optional
},
]
"""
if not self._isValidSeverity(severity):
return False
request = []
alert = {}
labels = {}
annotations = {}
# add labels
labels["alertname"] = alertName
labels["severity"] = severity
labels["tag"] = tag
labels["service"] = service
alert["labels"] = labels
# add annotations
annotations["hostname"] = self.hostname
annotations["summary"] = summary
annotations["description"] = description
alert["annotations"] = annotations
# In python3 we won't need the LocalTimezone class
# Will change to d = datetime.now().astimezone() + timedelta(seconds=endSecs)
d = datetime.now(self.ltz) + timedelta(seconds=endSecs)
alert["endsAt"] = d.isoformat("T")
alert["generatorURL"] = generatorURL
request.append(alert)
# need to do this because pycurl_manager only accepts dict and encoded strings type
params = json.dumps(request)
res = self.mgr.getdata(self.alertManagerUrl, params=params, headers=self.headers, verb='POST')
return res
def _isValidSeverity(self, severity):
"""
Used to check if the severity of the alert matches the valid levels: low, medium, high
:param severity: severity of the alert
:return: True or False
"""
if severity not in self.validSeverity:
logging.critical("Alert submitted to AlertManagerAPI with invalid severity: %s", severity)
return False
return True | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Services/AlertManager/AlertManagerAPI.py | 0.810554 | 0.161849 | AlertManagerAPI.py | pypi |
from builtins import str
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class DBCreator(DBFormatter):
"""
_DBCreator_
Generic class for creating database tables.
"""
def __init__(self, logger, dbinterface):
"""
_init_
Call the constructor of the parent class and create empty dictionaries
to hold table create statements, constraint statements and insert
statements.
"""
DBFormatter.__init__(self, logger, dbinterface)
self.create = {}
self.constraints = {}
self.inserts = {}
self.indexes = {}
def execute(self, conn = None, transaction = False):
"""
_execute_
Generic method to create tables and constraints by execute
sql statements in the create, and constraints dictionaries.
Before execution the keys assigned to the tables in the self.create
dictionary are sorted, to offer the possibilitiy of executing
table creation in a certain order.
"""
# create tables
for i in sorted(self.create.keys()):
try:
self.dbi.processData(self.create[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.create[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# create indexes
for i in self.indexes:
try:
self.dbi.processData(self.indexes[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.indexes[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# set constraints
for i in self.constraints:
try:
self.dbi.processData(self.constraints[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.constraints[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# insert permanent data
for i in self.inserts:
try:
self.dbi.processData(self.inserts[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.inserts[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
return True
def __str__(self):
"""
_str_
Return a well formatted text representation of the schema held in the
self.create, self.constraints, self.inserts, self.indexes dictionaries.
"""
string = ''
for i in self.create, self.constraints, self.inserts, self.indexes:
for j in i:
string = string + i[j].lstrip() + '\n'
return string | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Database/DBCreator.py | 0.526586 | 0.233969 | DBCreator.py | pypi |
from __future__ import division, print_function
from builtins import str, object
try:
import mongomock
except ImportError:
# this library should only be required by unit tests
mongomock = None
from pymongo import MongoClient, errors, IndexModel
from pymongo.errors import ConnectionFailure
class MongoDB(object):
"""
A simple wrapper class for creating a connection to a MongoDB instance
"""
def __init__(self, database=None, server=None,
create=False, collections=None, testIndexes=False,
logger=None, mockMongoDB=False, **kwargs):
"""
:databases: A database Name to connect to
:server: The server url or a list of (server:port) pairs (see https://docs.mongodb.com/manual/reference/connection-string/)
:create: A flag to trigger a database creation (if missing) during
object construction, together with collections if present.
:collections: A list of tuples describing collections with indexes -
the first element is considered the collection name, all
the rest elements are considered as indexes
:testIndexes: A flag to trigger index test and eventually to create them
if missing (TODO)
:mockMongoDB: A flag to trigger a database simulation instead of trying
to connect to a real database server.
:logger: Logger
Here follows a short list of usefull optional parameters accepted by the
MongoClient which may be passed as keyword arguments to the current module:
:replicaSet: The name of the replica set to connect to. The driver will verify
that all servers it connects to match this name. Implies that the
hosts specified are a seed list and the driver should attempt to
find all members of the set. Defaults to None.
:port: The port number on which to connect. It is overwritten by the ports
defined in the Url string or from the tuples listed in the server list
:connect: If True, immediately begin connecting to MongoDB in the background.
Otherwise connect on the first operation.
:directConnection: If True, forces the client to connect directly to the specified MongoDB
host as a standalone. If False, the client connects to the entire
replica set of which the given MongoDB host(s) is a part.
If this is True and a mongodb+srv:// URI or a URI containing multiple
seeds is provided, an exception will be raised.
:username: A string
:password: A string
Although username and password must be percent-escaped in a MongoDB URI,
they must not be percent-escaped when passed as parameters. In this example,
both the space and slash special characters are passed as-is:
MongoClient(username="user name", password="pass/word")
"""
self.server = server
self.logger = logger
self.mockMongoDB = mockMongoDB
if mockMongoDB and mongomock is None:
msg = "You are trying to mock MongoDB, but you do not have mongomock in the python path."
self.logger.critical(msg)
raise ImportError(msg)
# NOTE: We need to explicitely check for server availiability.
# From pymongo Documentation: https://pymongo.readthedocs.io/en/stable/api/pymongo/mongo_client.html
# """
# ...
# Starting with version 3.0 the :class:`MongoClient`
# constructor no longer blocks while connecting to the server or
# servers, and it no longer raises
# :class:`~pymongo.errors.ConnectionFailure` if they are
# unavailable, nor :class:`~pymongo.errors.ConfigurationError`
# if the user's credentials are wrong. Instead, the constructor
# returns immediately and launches the connection process on
# background threads.
# ...
# """
try:
if mockMongoDB:
self.client = mongomock.MongoClient()
self.logger.info("NOTICE: MongoDB is set to use mongomock, instead of real database.")
else:
self.client = MongoClient(host=self.server, **kwargs)
self.client.server_info()
self.client.admin.command('ping')
except ConnectionFailure as ex:
msg = "Could not connect to MongoDB server: %s. Server not available. \n"
msg += "Giving up Now."
self.logger.error(msg, self.server)
raise ex from None
except Exception as ex:
msg = "Could not connect to MongoDB server: %s. Due to unknown reason: %s\n"
msg += "Giving up Now."
self.logger.error(msg, self.server, str(ex))
raise ex from None
self.create = create
self.testIndexes = testIndexes
self.dbName = database
self.collections = collections or []
self._dbConnect(database)
if self.create and self.collections:
for collection in self.collections:
self._collCreate(collection, database)
if self.testIndexes and self.collections:
for collection in self.collections:
self._indexTest(collection[0], collection[1])
def _indexTest(self, collection, index):
pass
def _collTest(self, coll, db):
# self[db].list_collection_names()
pass
def collCreate(self, coll):
"""
A public method for _collCreate
"""
self._collCreate(coll, self.database)
def _collCreate(self, coll, db):
"""
A function used to explicitly create a collection with the relevant
indexes - used to avoid the Lazy Creating from MongoDB and eventual issues
in case we end up with no indexed collection, especially ones missing
the (`unique` index parameter)
:coll: A tuple describing one collection with indexes -
The first element is considered to be the collection name, and all
the rest of the elements are considered to be indexes.
The indexes must be of type IndexModel. See pymongo documentation:
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.create_index
:db: The database name for the collection
"""
collName = coll[0]
collIndexes = list(coll[1:])
try:
self.client[db].create_collection(collName)
except errors.CollectionInvalid:
# this error is thrown in case of an already existing collection
msg = "Collection '{}' Already exists in database '{}'".format(coll, db)
self.logger.warning(msg)
if collIndexes:
for index in collIndexes:
if not isinstance(index, IndexModel):
msg = "ERR: Bad Index type for collection %s" % collName
raise errors.InvalidName
try:
self.client[db][collName].create_indexes(collIndexes)
except Exception as ex:
msg = "Failed to create indexes on collection: %s\n%s" % (collName, str(ex))
self.logger.error(msg)
raise ex
def _dbTest(self, db):
"""
Tests database connection.
"""
# Test connection (from mongoDB documentation):
# https://api.mongodb.com/python/3.4.0/api/pymongo/mongo_client.html
try:
# The 'ismaster' command is cheap and does not require auth.
self.client.admin.command('ismaster')
except errors.ConnectionFailure as ex:
msg = "Server not available: %s" % str(ex)
self.logger.error(msg)
raise ex
# Test for database existence
if db not in self.client.list_database_names():
msg = "Missing MongoDB databases: %s" % db
self.logger.error(msg)
raise errors.InvalidName
def _dbCreate(self, db):
# creating an empty collection in order to create the database
_initColl = self.client[db].create_collection('_initCollection')
_initColl.insert_one({})
# NOTE: never delete the _initCollection if you want the database to persist
# self.client[db].drop_collection('_initCollection')
def dbConnect(self):
"""
A public method for _dbConnect
"""
self._dbConnect(self.database)
def _dbConnect(self, db):
"""
The function to be used for the initial database connection creation and testing
"""
try:
setattr(self, db, self.client[db])
if not self.mockMongoDB:
self._dbTest(db)
except errors.ConnectionFailure as ex:
msg = "Could not connect to MongoDB server for database: %s\n%s\n" % (db, str(ex))
msg += "Giving up Now."
self.logger.error(msg)
raise ex
except errors.InvalidName as ex:
msg = "Could not connect to a missing MongoDB databases: %s\n%s" % (db, str(ex))
self.logger.error(msg)
if self.create:
msg = "Trying to create: %s" % db
self.logger.error(msg)
try:
# self._dbCreate(getattr(self, db))
self._dbCreate(db)
except Exception as exc:
msg = "Could not create MongoDB databases: %s\n%s\n" % (db, str(exc))
msg += "Giving up Now."
self.logger.error(msg)
raise exc
try:
self._dbTest(db)
except Exception as exc:
msg = "Second failure while testing %s\n%s\n" % (db, str(exc))
msg += "Giving up Now."
self.logger.error(msg)
raise exc
msg = "Database %s successfully created" % db
self.logger.error(msg)
except Exception as ex:
msg = "General Exception while trying to connect to : %s\n%s" % (db, str(ex))
self.logger.error(msg)
raise ex | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Database/MongoDB.py | 0.660829 | 0.271949 | MongoDB.py | pypi |
import logging
import time
from WMCore.DataStructs.WMObject import WMObject
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class Transaction(WMObject):
dbi = None
def __init__(self, dbinterface = None):
"""
Get the connection from the DBInterface and open a new transaction on it
"""
self.dbi = dbinterface
self.conn = None
self.transaction = None
def begin(self):
if self.conn == None:
self.conn = self.dbi.connection()
if self.conn.closed:
self.conn = self.dbi.connection()
if self.transaction == None:
self.transaction = self.conn.begin()
return
def processData(self, sql, binds={}):
"""
Propagates the request to the proper dbcore backend,
and performs checks for lost (or closed) connection.
"""
result = self.dbi.processData(sql, binds, conn = self.conn,
transaction = True)
return result
def commit(self):
"""
Commit the transaction and return the connection to the pool
"""
if not self.transaction == None:
self.transaction.commit()
if not self.conn == None:
self.conn.close()
self.conn = None
self.transaction = None
def rollback(self):
"""
To be called if there is an exception and you want to roll back the
transaction and return the connection to the pool
"""
if self.transaction:
self.transaction.rollback()
if self.conn:
self.conn.close()
self.conn = None
self.transaction = None
return
def rollbackForError(self):
"""
This is called when handling a major exception. This is because sometimes
you can end up in a situation where the transaction appears open, but is not. In
this case, calling a rollback on the transaction will cause an exception, which
then destroys all logging and shutdown of the actual code.
Use only in components.
"""
try:
self.rollback()
except:
pass
return | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Database/Transaction.py | 0.487063 | 0.150809 | Transaction.py | pypi |
from copy import copy
from Utils.IteratorTools import grouper
import WMCore.WMLogging
from WMCore.DataStructs.WMObject import WMObject
from WMCore.Database.ResultSet import ResultSet
class DBInterface(WMObject):
"""
Base class for doing SQL operations using a SQLAlchemy engine, or
pre-exisitng connection.
processData will take a (list of) sql statements and a (list of)
bind variable dictionaries and run the statements on the DB. If
necessary it will substitute binds into the sql (MySQL).
TODO:
Add in some suitable exceptions in one or two places
Test the hell out of it
Support executemany()
"""
logger = None
engine = None
def __init__(self, logger, engine):
self.logger = logger
self.logger.info ("Instantiating base WM DBInterface")
self.engine = engine
self.maxBindsPerQuery = 500
def buildbinds(self, sequence, thename, therest=[{}]):
"""
Build a list of binds. Can be used recursively, e.g.:
buildbinds(file, 'file', buildbinds(pnn, 'location'), {'lumi':123})
TODO: replace with an appropriate map function
"""
binds = []
for r in sequence:
for i in self.makelist(therest):
thebind = copy(i)
thebind[thename] = r
binds.append(thebind)
return binds
def executebinds(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executebinds_
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
if b == None:
resultProxy = connection.execute(s)
else:
resultProxy = connection.execute(s, b)
if returnCursor:
return resultProxy
result = ResultSet()
result.add(resultProxy)
resultProxy.close()
return result
def executemanybinds(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executemanybinds_
b is a list of dictionaries for the binds, e.g.:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
see: http://www.gingerandjohn.com/archives/2004/02/26/cx_oracle-executemany-example/
Can't executemany() selects - so do each combination of binds here instead.
This will return a list of sqlalchemy.engine.base.ResultProxy object's
one for each set of binds.
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
s = s.strip()
if s.lower().endswith('select', 0, 6):
"""
Trying to select many
"""
if returnCursor:
result = []
for bind in b:
result.append(connection.execute(s, bind))
else:
result = ResultSet()
for bind in b:
resultproxy = connection.execute(s, bind)
result.add(resultproxy)
resultproxy.close()
return self.makelist(result)
"""
Now inserting or updating many
"""
result = connection.execute(s, b)
return self.makelist(result)
def connection(self):
"""
Return a connection to the engine (from the connection pool)
"""
return self.engine.connect()
def processData(self, sqlstmt, binds={}, conn=None,
transaction=False, returnCursor=False):
"""
set conn if you already have an active connection to reuse
set transaction = True if you already have an active transaction
"""
connection = None
try:
if not conn:
connection = self.connection()
else:
connection = conn
result = []
# Can take either a single statement or a list of statements and binds
sqlstmt = self.makelist(sqlstmt)
binds = self.makelist(binds)
if len(sqlstmt) > 0 and (len(binds) == 0 or (binds[0] == {} or binds[0] == None)):
# Should only be run by create statements
if not transaction:
#WMCore.WMLogging.sqldebug("transaction created in DBInterface")
trans = connection.begin()
for i in sqlstmt:
r = self.executebinds(i, connection=connection,
returnCursor=returnCursor)
result.append(r)
if not transaction:
trans.commit()
elif len(binds) > len(sqlstmt) and len(sqlstmt) == 1:
#Run single SQL statement for a list of binds - use execute_many()
if not transaction:
trans = connection.begin()
for subBinds in grouper(binds, self.maxBindsPerQuery):
result.extend(self.executemanybinds(sqlstmt[0], subBinds,
connection=connection, returnCursor=returnCursor))
if not transaction:
trans.commit()
elif len(binds) == len(sqlstmt):
# Run a list of SQL for a list of binds
if not transaction:
trans = connection.begin()
for i, s in enumerate(sqlstmt):
b = binds[i]
r = self.executebinds(s, b, connection=connection,
returnCursor=returnCursor)
result.append(r)
if not transaction:
trans.commit()
else:
self.logger.exception(
"DBInterface.processData Nothing executed, problem with your arguments")
self.logger.exception(
"DBInterface.processData SQL = %s" % sqlstmt)
WMCore.WMLogging.sqldebug('DBInterface.processData sql is %s items long' % len(sqlstmt))
WMCore.WMLogging.sqldebug('DBInterface.processData binds are %s items long' % len(binds))
assert_value = False
if len(binds) == len(sqlstmt):
assert_value = True
WMCore.WMLogging.sqldebug('DBInterface.processData are binds and sql same length? : %s' % (assert_value))
WMCore.WMLogging.sqldebug('sql: %s\n binds: %s\n, connection:%s\n, transaction:%s\n' %
(sqlstmt, binds, connection, transaction))
WMCore.WMLogging.sqldebug('type check:\nsql: %s\n binds: %s\n, connection:%s\n, transaction:%s\n' %
(type(sqlstmt), type(binds), type(connection), type(transaction)))
raise Exception("""DBInterface.processData Nothing executed, problem with your arguments
Probably mismatched sizes for sql (%i) and binds (%i)""" % (len(sqlstmt), len(binds)))
finally:
if not conn and connection != None:
connection.close() # Return connection to the pool
return result | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Database/DBCore.py | 0.414069 | 0.245401 | DBCore.py | pypi |
import copy
from WMCore.Database.DBCore import DBInterface
from WMCore.Database.ResultSet import ResultSet
def bindVarCompare(a):
"""
_bindVarCompare_
Bind variables are represented as a tuple with the first element being the
variable name and the second being it's position in the query. We sort on
the position in the query.
"""
return a[1]
def stringLengthCompare(a):
"""
_stringLengthCompare_
Sort comparison function to sort strings by length.
Since we want to sort from longest to shortest, this must be reversed when used
"""
return len(a)
class MySQLInterface(DBInterface):
def substitute(self, origSQL, origBindsList):
"""
_substitute_
Transform as set of bind variables from a list of dictionaries to a list
of tuples:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
Will be transformed into:
b = [ ('value1a', 'value2a'), ('value1b', 'value2b')]
Don't need to substitute in the binds as executemany does that
internally. But the sql will also need to be reformatted, such that
:bind_name becomes %s.
See: http://www.devshed.com/c/a/Python/MySQL-Connectivity-With-Python/5/
"""
if origBindsList == None:
return origSQL, None
origBindsList = self.makelist(origBindsList)
origBind = origBindsList[0]
bindVarPositionList = []
updatedSQL = copy.copy(origSQL)
# We process bind variables from longest to shortest to avoid a shorter
# bind variable matching a longer one. For example if we have two bind
# variables: RELEASE_VERSION and RELEASE_VERSION_ID the former will
# match against the latter, causing problems. We'll sort the variable
# names by length to guard against this.
bindVarNames = list(origBind)
bindVarNames.sort(key=stringLengthCompare, reverse=True)
bindPositions = {}
for bindName in bindVarNames:
searchPosition = 0
while True:
bindPosition = origSQL.lower().find(":%s" % bindName.lower(),
searchPosition)
if bindPosition == -1:
break
if bindPosition not in bindPositions:
bindPositions[bindPosition] = 0
bindVarPositionList.append((bindName, bindPosition))
searchPosition = bindPosition + 1
searchPosition = 0
while True:
bindPosition = updatedSQL.lower().find(":%s" % bindName.lower(),
searchPosition)
if bindPosition == -1:
break
left = updatedSQL[0:bindPosition]
right = updatedSQL[bindPosition + len(bindName) + 1:]
updatedSQL = left + "%s" + right
bindVarPositionList.sort(key=bindVarCompare)
mySQLBindVarsList = []
for origBind in origBindsList:
mySQLBindVars = []
for bindVarPosition in bindVarPositionList:
mySQLBindVars.append(origBind[bindVarPosition[0]])
mySQLBindVarsList.append(tuple(mySQLBindVars))
return (updatedSQL, mySQLBindVarsList)
def executebinds(self, s = None, b = None, connection = None,
returnCursor = False):
"""
_executebinds_
Execute a SQL statement that has a single set of bind variables.
Transform the bind variables into the format that MySQL expects.
"""
s, b = self.substitute(s, b)
return DBInterface.executebinds(self, s, b, connection, returnCursor)
def executemanybinds(self, s = None, b = None, connection = None,
returnCursor = False):
"""
_executemanybinds_
Execute a SQL statement that has multiple sets of bind variables.
Transform the bind variables into the format that MySQL expects.
"""
newsql, binds = self.substitute(s, b)
return DBInterface.executemanybinds(self, newsql, binds, connection,
returnCursor) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Database/MySQLCore.py | 0.637031 | 0.431105 | MySQLCore.py | pypi |
from __future__ import print_function
from builtins import str, bytes, int
from future.utils import viewitems
from Utils.PythonVersion import PY2
import sys
import types
class _EmptyClass(object):
pass
class JSONThunker(object):
"""
_JSONThunker_
Converts an arbitrary object to <-> from a jsonable object.
Will, for the most part "do the right thing" about various instance objects
by storing their class information along with their data in a dict. Handles
a recursion limit to prevent infinite recursion.
self.passThroughTypes - stores a list of types that should be passed
through unchanged to the JSON parser
self.blackListedModules - a list of modules that should not be stored in
the JSON.
"""
def __init__(self):
self.passThroughTypes = (type(None),
bool,
int,
float,
complex,
str,
bytes,
)
# objects that inherit from dict should be treated as a dict
# they don't store their data in __dict__. There was enough
# of those classes that it warrented making a special case
self.dictSortOfObjects = (('WMCore.Datastructs.Job', 'Job'),
('WMCore.WMBS.Job', 'Job'),
('WMCore.Database.CMSCouch', 'Document'))
# ditto above, but for lists
self.listSortOfObjects = (('WMCore.DataStructs.JobPackage', 'JobPackage'),
('WMCore.WMBS.JobPackage', 'JobPackage'),)
self.foundIDs = {}
# modules we don't want JSONed
self.blackListedModules = ('sqlalchemy.engine.threadlocal',
'WMCore.Database.DBCore',
'logging',
'WMCore.DAOFactory',
'WMCore.WMFactory',
'WMFactory',
'WMCore.Configuration',
'WMCore.Database.Transaction',
'threading',
'datetime')
def checkRecursion(self, data):
"""
handles checking for infinite recursion
"""
if id(data) in self.foundIDs:
if self.foundIDs[id(data)] > 5:
self.unrecurse(data)
return "**RECURSION**"
else:
self.foundIDs[id(data)] += 1
return data
else:
self.foundIDs[id(data)] = 1
return data
def unrecurse(self, data):
"""
backs off the recursion counter if we're returning from _thunk
"""
try:
self.foundIDs[id(data)] -= 1
except:
print("Could not find count for id %s of type %s data %s" % (id(data), type(data), data))
raise
def checkBlackListed(self, data):
"""
checks to see if a given object is from a blacklisted module
"""
try:
# special case
if data.__class__.__module__ == 'WMCore.Database.CMSCouch' and data.__class__.__name__ == 'Document':
data.__class__ = type({})
return data
if data.__class__.__module__ in self.blackListedModules:
return "Blacklisted JSON object: module %s, name %s, str() %s" % \
(data.__class__.__module__, data.__class__.__name__, str(data))
else:
return data
except Exception:
return data
def thunk(self, toThunk):
"""
Thunk - turns an arbitrary object into a JSONable object
"""
self.foundIDs = {}
data = self._thunk(toThunk)
return data
def unthunk(self, data):
"""
unthunk - turns a previously 'thunked' object back into a python object
"""
return self._unthunk(data)
def handleSetThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
tempDict = {'thunker_encoded_json': True, 'type': 'set'}
tempDict['set'] = self._thunk(list(toThunk))
self.unrecurse(toThunk)
return tempDict
def handleListThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
for k, v in enumerate(toThunk):
toThunk[k] = self._thunk(v)
self.unrecurse(toThunk)
return toThunk
def handleDictThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
special = False
tmpdict = {}
for k, v in viewitems(toThunk):
if type(k) == type(int):
special = True
tmpdict['_i:%s' % k] = self._thunk(v)
elif type(k) == type(float):
special = True
tmpdict['_f:%s' % k] = self._thunk(v)
else:
tmpdict[k] = self._thunk(v)
if special:
toThunk['thunker_encoded_json'] = self._thunk(True)
toThunk['type'] = self._thunk('dict')
toThunk['dict'] = tmpdict
else:
toThunk.update(tmpdict)
self.unrecurse(toThunk)
return toThunk
def handleObjectThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
toThunk = self.checkBlackListed(toThunk)
if isinstance(toThunk, (str, bytes)):
# things that got blacklisted
return toThunk
if hasattr(toThunk, '__to_json__'):
# Use classes own json thunker
toThunk2 = toThunk.__to_json__(self)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, dict):
toThunk2 = self.handleDictObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, list):
# a mother thunking list
toThunk2 = self.handleListObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
else:
try:
thunktype = '%s.%s' % (toThunk.__class__.__module__,
toThunk.__class__.__name__)
tempDict = {'thunker_encoded_json': True, 'type': thunktype}
tempDict[thunktype] = self._thunk(toThunk.__dict__)
self.unrecurse(toThunk)
return tempDict
except Exception as e:
tempDict = {'json_thunk_exception_': "%s" % e}
self.unrecurse(toThunk)
return tempDict
def handleDictObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_dict': True,
'type': thunktype,
thunktype: {}}
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
for k, v in viewitems(data):
tempDict[thunktype][k] = self._thunk(v)
return tempDict
def handleDictObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_dict', False)
thunktype = data.pop('type', False)
for k, v in viewitems(data):
if k == thunktype:
for k2, v2 in viewitems(data[thunktype]):
value[k2] = self._unthunk(v2)
else:
value.__dict__[k] = self._unthunk(v)
return value
def handleListObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_list': True,
'type': thunktype,
thunktype: []}
for k, v in enumerate(data):
tempDict['thunktype'].append(self._thunk(v))
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
return tempDict
def handleListObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_list', False)
thunktype = data.pop('type')
for k, v in viewitems(data[thunktype]):
setattr(value, k, self._unthunk(v))
for k, v in viewitems(data):
if k == thunktype:
continue
value.__dict__ = self._unthunk(v)
return value
def _thunk(self, toThunk):
"""
helper function for thunk, does the actual work
"""
if isinstance(toThunk, self.passThroughTypes):
return toThunk
elif type(toThunk) is list:
return self.handleListThunk(toThunk)
elif type(toThunk) is dict:
return self.handleDictThunk(toThunk)
elif type(toThunk) is set:
return self.handleSetThunk(toThunk)
elif type(toThunk) is types.FunctionType:
self.unrecurse(toThunk)
return "function reference"
elif isinstance(toThunk, object):
return self.handleObjectThunk(toThunk)
else:
self.unrecurse(toThunk)
raise RuntimeError(type(toThunk))
def _unthunk(self, jsondata):
"""
_unthunk - does the actual work for unthunk
"""
if PY2 and type(jsondata) is str:
return jsondata.encode("utf-8")
if type(jsondata) is dict:
if 'thunker_encoded_json' in jsondata:
# we've got a live one...
if jsondata['type'] == 'set':
newSet = set()
for i in self._unthunk(jsondata['set']):
newSet.add(self._unthunk(i))
return newSet
if jsondata['type'] == 'dict':
# We have a "special" dict
data = {}
for k, v in viewitems(jsondata['dict']):
tmp = self._unthunk(v)
if k.startswith('_i:'):
data[int(k.lstrip('_i:'))] = tmp
elif k.startswith('_f:'):
data[float(k.lstrip('_f:'))] = tmp
else:
data[k] = tmp
return data
else:
# spawn up an instance.. good luck
# here be monsters
# inspired from python's pickle code
ourClass = self.getThunkedClass(jsondata)
value = _EmptyClass()
if hasattr(ourClass, '__from_json__'):
# Use classes own json loader
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = ourClass.__from_json__(value, jsondata, self)
elif 'thunker_encoded_json' in jsondata and 'is_dict' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleDictObjectUnThunk(value, jsondata)
elif 'thunker_encoded_json' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleListObjectUnThunk(value, jsondata)
else:
raise RuntimeError('Could not unthunk a class. Code to try was removed because it had errors.')
return value
else:
data = {}
for k, v in viewitems(jsondata):
data[k] = self._unthunk(v)
return data
else:
return jsondata
@staticmethod
def getThunkedClass(jsondata):
"""
Work out the class from it's thunked json representation
"""
module = jsondata['type'].rsplit('.', 1)[0]
name = jsondata['type'].rsplit('.', 1)[1]
if (module == 'WMCore.Services.Requests') and (name == JSONThunker):
raise RuntimeError("Attempted to unthunk a JSONThunker..")
__import__(module)
mod = sys.modules[module]
ourClass = getattr(mod, name)
return ourClass | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Wrappers/JsonWrapper/JSONThunker.py | 0.443118 | 0.360208 | JSONThunker.py | pypi |
from builtins import next, str, object
from future.utils import viewitems
import xml.parsers.expat
class Node(object):
"""
_Node_
Really simple DOM like container to simplify parsing the XML file
and formatting the character data without all the whitespace guff
"""
def __init__(self, name, attrs):
self.name = str(name)
self.attrs = {}
self.text = None
for k, v in viewitems(attrs):
self.attrs.__setitem__(str(k), str(v))
self.children = []
def __str__(self):
result = " %s %s \"%s\"\n" % (self.name, self.attrs, self.text)
for child in self.children:
result += str(child)
return result
def coroutine(func):
"""
_coroutine_
Decorator method used to prime coroutines
"""
def start(*args,**kwargs):
cr = func(*args,**kwargs)
next(cr)
return cr
return start
def xmlFileToNode(reportFile):
"""
_xmlFileToNode_
Use expat and the build coroutine to parse the XML file and build
a node structure
"""
node = Node("JobReports", {})
expat_parse(open(reportFile, 'rb'),
build(node))
return node
def expat_parse(f, target):
"""
_expat_parse_
Expat based XML parsing that feeds a node building coroutine
"""
parser = xml.parsers.expat.ParserCreate()
#parser.buffer_size = 65536
parser.buffer_text = True
# a leftover from the py2py3 migration - TO BE REMOVED
# parser.returns_unicode = False
parser.StartElementHandler = \
lambda name,attrs: target.send(('start',(name,attrs)))
parser.EndElementHandler = \
lambda name: target.send(('end',name))
parser.CharacterDataHandler = \
lambda data: target.send(('text',data))
parser.ParseFile(f)
@coroutine
def build(topNode):
"""
_build_
Node structure builder that is fed from the expat_parse method
"""
nodeStack = [topNode]
charCache = []
while True:
event, value = (yield)
if event == "start":
charCache = []
newnode = Node(value[0], value[1])
nodeStack[-1].children.append(newnode)
nodeStack.append(newnode)
elif event == "text":
charCache.append(value)
else: # end
nodeStack[-1].text = str(''.join(charCache)).strip()
nodeStack.pop()
charCache = [] | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Algorithms/ParseXMLFile.py | 0.592431 | 0.276608 | ParseXMLFile.py | pypi |
from __future__ import print_function, division
from builtins import str, range
import math
import decimal
import logging
from WMCore.WMException import WMException
class MathAlgoException(WMException):
"""
Some simple math algo exceptions
"""
pass
def getAverageStdDev(numList):
"""
_getAverageStdDev_
Given a list, calculate both the average and the
standard deviation.
"""
if len(numList) < 0:
# Nothing to do here
return 0.0, 0.0
total = 0.0
average = 0.0
stdBase = 0.0
# Assemble the average
skipped = 0
for value in numList:
try:
if math.isnan(value) or math.isinf(value):
skipped += 1
continue
else:
total += value
except TypeError:
msg = "Attempted to take average of non-numerical values.\n"
msg += "Expected int or float, got %s: %s" % (value.__class__, value)
logging.error(msg)
logging.debug("FullList: %s", numList)
raise MathAlgoException(msg)
length = len(numList) - skipped
if length < 1:
return average, total
average = total / length
for value in numList:
tmpValue = value - average
stdBase += (tmpValue * tmpValue)
stdDev = math.sqrt(stdBase / length)
if math.isnan(average) or math.isinf(average):
average = 0.0
if math.isnan(stdDev) or math.isinf(average) or not decimal.Decimal(str(stdDev)).is_finite():
stdDev = 0.0
if not isinstance(stdDev, (int, float)):
stdDev = 0.0
return average, stdDev
def createHistogram(numList, nBins, limit):
"""
_createHistogram_
Create a histogram proxy (a list of bins) for a
given list of numbers
"""
average, stdDev = getAverageStdDev(numList = numList)
underflow = []
overflow = []
histEvents = []
histogram = []
for value in numList:
if math.fabs(average - value) <= limit * stdDev:
# Then we counted this event
histEvents.append(value)
elif average < value:
overflow.append(value)
elif average > value:
underflow.append(value)
if len(underflow) > 0:
binAvg, binStdDev = getAverageStdDev(numList=underflow)
histogram.append({'type': 'underflow',
'average': binAvg,
'stdDev': binStdDev,
'nEvents': len(underflow)})
if len(overflow) > 0:
binAvg, binStdDev = getAverageStdDev(numList=overflow)
histogram.append({'type': 'overflow',
'average': binAvg,
'stdDev': binStdDev,
'nEvents': len(overflow)})
if len(histEvents) < 1:
# Nothing to do?
return histogram
histEvents.sort()
upperBound = max(histEvents)
lowerBound = min(histEvents)
if lowerBound == upperBound:
# This is a problem
logging.debug("Only one value in the histogram!")
nBins = 1
upperBound = upperBound + 1
lowerBound = lowerBound - 1
binSize = (upperBound - lowerBound)/nBins
binSize = floorTruncate(binSize)
for x in range(nBins):
lowerEdge = floorTruncate(lowerBound + (x * binSize))
histogram.append({'type': 'standard',
'lowerEdge': lowerEdge,
'upperEdge': lowerEdge + binSize,
'average': 0.0,
'stdDev': 0.0,
'nEvents': 0})
for bin_ in histogram:
if bin_['type'] != 'standard':
continue
binList = []
for value in histEvents:
if value >= bin_['lowerEdge'] and value <= bin_['upperEdge']:
# Then we're in the bin
binList.append(value)
elif value > bin_['upperEdge']:
# Because this is a sorted list we are now out of the bin range
# Calculate our values and break
break
else:
continue
# If we get here, it's because we're out of values in the bin
# Time to do some math
if len(binList) < 1:
# Nothing to do here, leave defaults
continue
binAvg, binStdDev = getAverageStdDev(numList=binList)
bin_['average'] = binAvg
bin_['stdDev'] = binStdDev
bin_['nEvents'] = len(binList)
return histogram
def floorTruncate(value, precision=3):
"""
_floorTruncate_
Truncate a value to a set number of decimal points
Always truncates to a LOWER value, this is so that using it for
histogram binning creates values beneath the histogram lower edge.
"""
prec = math.pow(10, precision)
return math.floor(value * prec)/prec
def sortDictionaryListByKey(dictList, key, reverse=False):
"""
_sortDictionaryListByKey_
Given a list of dictionaries and a key with a numerical
value, sort that dictionary in order of that key's value.
NOTE: If the key does not exist, this will not raise an exception
This is because this is used for sorting of performance histograms
And not all histograms have the same value
"""
return sorted(dictList, key=lambda k: float(k.get(key, 0.0)), reverse=reverse)
def getLargestValues(dictList, key, n=1):
"""
_getLargestValues_
Take a list of dictionaries, sort them by the value of a
particular key, and return the n largest entries.
Key must be a numerical key.
"""
sortedList = sortDictionaryListByKey(dictList=dictList, key=key, reverse=True)
return sortedList[:n]
def validateNumericInput(value):
"""
_validateNumericInput_
Check that the value is actually an usable number
"""
value = float(value)
try:
if math.isnan(value) or math.isinf(value):
return False
except TypeError:
return False
return True
def calculateRunningAverageAndQValue(newPoint, n, oldM, oldQ):
"""
_calculateRunningAverageAndQValue_
Use the algorithm described in:
Donald E. Knuth (1998). The Art of Computer Programming, volume 2: Seminumerical Algorithms, 3rd ed.., p. 232. Boston: Addison-Wesley.
To calculate an average and standard deviation while getting data, the standard deviation
can be obtained from the so-called Q value with the following equation:
sigma = sqrt(Q/n)
This is also contained in the function calculateStdDevFromQ in this module. The average is equal to M.
"""
if not validateNumericInput(newPoint): raise MathAlgoException("Provided a non-valid newPoint")
if not validateNumericInput(n): raise MathAlgoException("Provided a non-valid n")
if n == 1:
M = newPoint
Q = 0.0
else:
if not validateNumericInput(oldM): raise MathAlgoException("Provided a non-valid oldM")
if not validateNumericInput(oldQ): raise MathAlgoException("Provided a non-valid oldQ")
M = oldM + (newPoint - oldM) / n
Q = oldQ + ((n - 1) * (newPoint - oldM) * (newPoint - oldM) / n)
return M, Q
def calculateStdDevFromQ(Q, n):
"""
_calculateStdDevFromQ_
If Q is the sum of the squared differences of some points to their average,
then the standard deviation is given by:
sigma = sqrt(Q/n)
This function calculates that formula
"""
if not validateNumericInput(Q): raise MathAlgoException("Provided a non-valid Q")
if not validateNumericInput(n): raise MathAlgoException("Provided a non-valid n")
sigma = math.sqrt(Q / n)
if not validateNumericInput(sigma): return 0.0
return sigma | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/Algorithms/MathAlgos.py | 0.591841 | 0.33565 | MathAlgos.py | pypi |
from builtins import str as newstr
import random, cherrypy
class RESTError(Exception):
"""Base class for REST errors.
.. attribute:: http_code
Integer, HTTP status code for this error. Also emitted as X-Error-HTTP
header value.
.. attribute:: app_code
Integer, application error code, to be emitted as X-REST-Status header.
.. attribute:: message
String, information about the error, to be emitted as X-Error-Detail
header. Should not contain anything sensitive, and in particular should
never include any unvalidated or unsafe data, e.g. input parameters or
data from a database. Normally a fixed label with one-to-one match with
the :obj:`app-code`. If the text exceeds 200 characters, it's truncated.
Since this is emitted as a HTTP header, it cannot contain newlines or
anything encoding-dependent.
.. attribute:: info
String, additional information beyond :obj:`message`, to be emitted as
X-Error-Info header. Like :obj:`message` should not contain anything
sensitive or unsafe, or text inappropriate for a HTTP response header,
and should be short enough to fit in 200 characters. This is normally
free form text to clarify why the error happened.
.. attribute:: errid
String, random unique identifier for this error, to be emitted as
X-Error-ID header and output into server logs when logging the error.
The purpose is that clients save this id when they receive an error,
and further error reporting or debugging can use this value to identify
the specific error, and for example to grep logs for more information.
.. attribute:: errobj
If the problem was caused by another exception being raised in the code,
reference to the original exception object. For example if the code dies
with an :class:`KeyError`, this is the original exception object. This
error is logged to the server logs when reporting the error, but no
information about it is returned to the HTTP client.
.. attribute:: trace
The origin of the exception as returned by :func:`format_exc`. The full
trace is emitted to the server logs, each line prefixed with timestamp.
This information is not returned to the HTTP client.
"""
http_code = None
app_code = None
message = None
info = None
errid = None
errobj = None
trace = None
def __init__(self, info = None, errobj = None, trace = None):
self.errid = "%032x" % random.randrange(1 << 128)
self.errobj = errobj
self.info = info
self.trace = trace
def __str__(self):
return "%s %s [HTTP %d, APP %d, MSG %s, INFO %s, ERR %s]" \
% (self.__class__.__name__, self.errid, self.http_code, self.app_code,
repr(self.message).replace("\n", " ~~ "),
repr(self.info).replace("\n", " ~~ "),
repr(self.errobj).replace("\n", " ~~ "))
class NotAcceptable(RESTError):
"Client did not specify format it accepts, or no compatible format was found."
http_code = 406
app_code = 201
message = "Not acceptable"
class UnsupportedMethod(RESTError):
"Client used HTTP request method which isn't supported for any API call."
http_code = 405
app_code = 202
message = "Request method not supported"
class MethodWithoutQueryString(RESTError):
"Client provided a query string which isn't acceptable for this request method."
http_code = 405
app_code = 203
message = "Query arguments not supported for this request method"
class APIMethodMismatch(RESTError):
"""Both the API and HTTP request methods are supported, but not in that
combination."""
http_code = 405
app_code = 204
message = "API not supported for this request method"
class APINotSpecified(RESTError):
"The request URL is missing API argument."
http_code = 400
app_code = 205
message = "API not specified"
class NoSuchInstance(RESTError):
"""The request URL is missing instance argument or the specified instance
does not exist."""
http_code = 404
app_code = 206
message = "No such instance"
class APINotSupported(RESTError):
"The request URL provides wrong API argument."
http_code = 404
app_code = 207
message = "API not supported"
class DataCacheEmpty(RESTError):
"The wmstats data cache has not be created."
http_code = 503
app_code = 208
message = "DataCache is Empty"
class DatabaseError(RESTError):
"""Parent class for database-related errors.
.. attribute: lastsql
A tuple of *(sql, binds, kwbinds),* where `sql` is the last SQL statement
executed and `binds`, `kwbinds` are the bind values used with it. Any
sensitive parts like passwords have already been censored from the `sql`
string. Note that for massive requests `binds` or `kwbinds` can get large.
These are logged out in the server logs when reporting the error, but no
information about these are returned to the HTTP client.
.. attribute: intance
String, the database instance for which the error occurred. This is
reported in the error message output to server logs, but no information
about this is returned to the HTTP client."""
lastsql = None
instance = None
def __init__(self, info = None, errobj = None, trace = None,
lastsql = None, instance = None):
RESTError.__init__(self, info, errobj, trace)
self.lastsql = lastsql
self.instance = instance
class DatabaseUnavailable(DatabaseError):
"""The instance argument is correct, but cannot connect to the database.
This error will only occur at initial attempt to connect to the database,
:class:`~.DatabaseConnectionError` is raised instead if the connection
ends prematurely after the transaction has already begun successfully."""
http_code = 503
app_code = 401
message = "Database unavailable"
class DatabaseConnectionError(DatabaseError):
"""Database was available when the operation started, but the connection
was lost or otherwise failed during the application operation."""
http_code = 504
app_code = 402
message = "Database connection failure"
class DatabaseExecutionError(DatabaseError):
"""Database operation failed."""
http_code = 500
app_code = 403
message = "Execution error"
class MissingParameter(RESTError):
"Client did not supply a parameter which is required."
http_code = 400
app_code = 301
message = "Missing required parameter"
class InvalidParameter(RESTError):
"Client supplied invalid value for a parameter."
http_code = 400
app_code = 302
message = "Invalid input parameter"
class MissingObject(RESTError):
"""An object required for the operation is missing. This might be a
pre-requisite needed to create a reference, or attempt to delete
an object which does not exist."""
http_code = 400
app_code = 303
message = "Required object is missing"
class TooManyObjects(RESTError):
"""Too many objects matched specified criteria. Usually this means
more than one object was matched, deleted, or inserted, when only
exactly one should have been subject to the operation."""
http_code = 400
app_code = 304
message = "Too many objects"
class ObjectAlreadyExists(RESTError):
"""An already existing object is on the way of the operation. This
is usually caused by uniqueness constraint violations when creating
new objects."""
http_code = 400
app_code = 305
message = "Object already exists"
class InvalidObject(RESTError):
"The specified object is invalid."
http_code = 400
app_code = 306
message = "Invalid object"
class ExecutionError(RESTError):
"""Input was in principle correct but there was an error processing
the request. This normally means either programming error, timeout, or
an unusual and unexpected problem with the database. For security reasons
little additional information is returned. If the problem persists, client
should contact service operators. The returned error id can be used as a
reference."""
http_code = 500
app_code = 403
message = "Execution error"
def report_error_header(header, val):
"""If `val` is non-empty, set CherryPy response `header` to `val`.
Replaces all newlines with "; " characters. If the resulting value is
longer than 200 characters, truncates it to the first 197 characters
and leaves a trailing ellipsis "..."."""
if val:
val = val.replace("\n", "; ")
if len(val) > 200: val = val[:197] + "..."
cherrypy.response.headers[header] = val
def report_rest_error(err, trace, throw):
"""Report a REST error: generate an appropriate log message, set the
response headers and raise an appropriate :class:`~.HTTPError`.
Normally `throw` would be True to translate the exception `err` into
a HTTP server error, but the function can also be called with `throw`
set to False if the purpose is merely to log an exception message.
:arg err: exception object.
:arg trace: stack trace to use in case `err` doesn't have one.
:arg throw: raise a :class:`~.HTTPError` if True."""
if isinstance(err, DatabaseError) and err.errobj:
offset = None
sql, binds, kwbinds = err.lastsql
if sql and err.errobj.args and hasattr(err.errobj.args[0], 'offset'):
offset = err.errobj.args[0].offset
sql = sql[:offset] + "<**>" + sql[offset:]
cherrypy.log("SERVER DATABASE ERROR %d/%d %s %s.%s %s [instance: %s] (%s);"
" last statement: %s; binds: %s, %s; offset: %s"
% (err.http_code, err.app_code, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
err.errid, err.instance, newstr(err.errobj).rstrip(),
sql, binds, kwbinds, offset))
for line in err.trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, RESTError):
if err.errobj:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s); derived from %s.%s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
newstr(err.errobj).rstrip()))
trace = err.trace
else:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, cherrypy.HTTPError):
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER HTTP ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(200)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.status)
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", err._message)
if throw: raise err
else:
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER OTHER ERROR %s.%s %s (%s)"
% (getattr(err, "__module__", "__builtins__"),
err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = 400
cherrypy.response.headers["X-Error-HTTP"] = 500
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", "Server error")
if throw: raise cherrypy.HTTPError(500, "Server error") | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/REST/Error.py | 0.835752 | 0.247783 | Error.py | pypi |
from builtins import str as newstr, bytes as newbytes
from WMCore.REST.Error import *
import math
import re
import numbers
from Utils.Utilities import decodeBytesToUnicodeConditional, encodeUnicodeToBytesConditional
from Utils.PythonVersion import PY3, PY2
def return_message(main_err, custom_err):
if custom_err:
return custom_err
return main_err
def _arglist(argname, kwargs):
val = kwargs.get(argname, None)
if val == None:
return []
elif not isinstance(val, list):
return [ val ]
else:
return val
def _check_rx(argname, val, custom_err = None):
if not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
return re.compile(val)
except:
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
def _check_str(argname, val, rx, custom_err = None):
"""
This is not really check val is ASCII.
2021 09: we are now using version 17.4.0 -> we do not need to convert to
bytes here anymore, we are using a recent verison of cherrypy.
We merged the funcionality of _check_str and _check_ustr into a single function
:type val: str or bytes (only utf8 encoded string) in py3, unicode or str in py2
:type rx: regex, compiled from native str (unicode in py3, bytes in py2)
"""
val = decodeBytesToUnicodeConditional(val, condition=PY3)
val = encodeUnicodeToBytesConditional(val, condition=PY2)
# `val` should now be a "native str" (unicode in py3, bytes in py2)
# here str has not been redefined. it is default `str` in both py2 and py3.
if not isinstance(val, str) or not rx.match(val):
raise InvalidParameter(return_message("Incorrect '%s' parameter %s %s" % (argname, type(val), val), custom_err))
return val
def _check_num(argname, val, bare, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Integral) and (not isinstance(val, (newstr, newbytes)) or (bare and not val.isdigit())):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = int(val)
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _check_real(argname, val, special, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Number) and not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = float(val)
if not special and (math.isnan(n) or math.isinf(n)):
raise InvalidParameter(return_message("Parameter '%s' improper value" % argname, custom_err))
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _validate_one(argname, param, safe, checker, optional, *args):
val = param.kwargs.get(argname, None)
if optional and val == None:
safe.kwargs[argname] = None
else:
safe.kwargs[argname] = checker(argname, val, *args)
del param.kwargs[argname]
def _validate_all(argname, param, safe, checker, *args):
safe.kwargs[argname] = [checker(argname, v, *args) for v in _arglist(argname, param.kwargs)]
if argname in param.kwargs:
del param.kwargs[argname]
def validate_rx(argname, param, safe, optional = False, custom_err = None):
"""Validates that an argument is a valid regexp.
Checks that an argument named `argname` exists in `param.kwargs`,
and it a string which compiles into a python regular expression.
If successful, the regexp object (not the string) is copied into
`safe.kwargs` and the string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_rx, optional, custom_err)
def validate_str(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
Accepts both unicode strings and utf8-encoded bytes strings as argument
string.
Accepts regex compiled only with "native strings", which means str in both
py2 and py3 (unicode in py3, bytes of utf8-encoded strings in py2)
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_ustr(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp,
During the py2->py3 modernization, _check_str and _check_ustr have been
merged into a single function called _check_str.
This function is now the same as validate_str, but is kept nonetheless
not to break our client's code.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_num(argname, param, safe, optional = False,
bare = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid integer number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is an int or a string convertible to a valid number. If successful
the integer value (not the string) is copied into `safe.kwargs`
and the original int/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
If `bare` is True, the number is required to be a pure digit sequence if it is a string.
Otherwise anything accepted by `int(val)` is acceted, including for
example leading white space or sign. Note that either way arbitrarily
large values are accepted; if you want to prevent abuse against big
integers, use the `minval` and `maxval` thresholds described below,
or check the length the of the string against some limit first.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_num, optional, bare, minval, maxval, custom_err)
def validate_real(argname, param, safe, optional = False,
special = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid real number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is float number or a string convertible to a valid number. If successful
the float value (not the string) is copied into `safe.kwargs`
and the original float/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
Anything accepted by `float(val)` is accepted, including for example
leading white space, sign and exponent. However NaN and +/- Inf are
rejected unless `special` is True.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_real, optional, special, minval, maxval, custom_err)
def validate_rxlist(argname, param, safe, custom_err = None):
"""Validates that an argument is an array of strings, each of which
can be compiled into a python regexp object.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which compiles into a regular expression.
If successful the array is copied into `safe.kwargs` and the value is
removed from `param.kwargs`. The value always becomes an array in
`safe.kwargs`, even if no or only one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_rx, custom_err)
def validate_strlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_ustrlist` instead if the argument string might need
to be converted from utf-8 into unicode first. Use this method only
for inputs which are meant to be bare strings.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_str, rx, custom_err)
def validate_ustrlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp once converted from utf-8 into unicode.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_strlist` instead if the argument strings should always
be bare strings. This one automatically converts everything into
unicode and expects input exclusively in utf-8, which may not be
appropriate constraints for some uses.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_ustr, rx, custom_err)
def validate_numlist(argname, param, safe, bare=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_num()`.
Checks that an argument named `argname` is either a single string/int or
an array of strings/int, each of which validates with `validate_num` and
`bare`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `kwsafe`, even if no or only one
argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_num, bare, minval, maxval, custom_err)
def validate_reallist(argname, param, safe, special=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_real()`.
Checks that an argument named `argname` is either a single string/float or
an array of strings/floats, each of which validates with `validate_real` and
`special`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `safe.kwargs`, even if no or only
one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_real, special, minval, maxval, custom_err)
def validate_no_more_input(param):
"""Verifies no more input is left in `param.args` or `param.kwargs`."""
if param.args:
raise InvalidParameter("Excess path arguments, not validated args='%s'" % param.args)
if param.kwargs:
raise InvalidParameter("Excess keyword arguments, not validated kwargs='%s'" % param.kwargs)
def validate_lengths(safe, *names):
"""Verifies that all `names` exist in `safe.kwargs`, are lists, and
all the lists have the same length. This is convenience function for
checking that an API accepting multiple values receives equal number
of values for all of its parameters."""
refname = names[0]
if refname not in safe.kwargs or not isinstance(safe.kwargs[refname], list):
raise InvalidParameter("Incorrect '%s' parameter" % refname)
reflen = len(safe.kwargs[refname])
for other in names[1:]:
if other not in safe.kwargs or not isinstance(safe.kwargs[other], list):
raise InvalidParameter("Incorrect '%s' parameter" % other)
elif len(safe.kwargs[other]) != reflen:
raise InvalidParameter("Mismatched number of arguments: %d %s vs. %d %s"
% (reflen, refname, len(safe.kwargs[other]), other)) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/REST/Validation.py | 0.600774 | 0.233335 | Validation.py | pypi |
from __future__ import print_function
import gzip
from builtins import str, bytes, object
from Utils.PythonVersion import PY3
from Utils.Utilities import encodeUnicodeToBytes, encodeUnicodeToBytesConditional
from future.utils import viewitems
import hashlib
import json
import xml.sax.saxutils
import zlib
from traceback import format_exc
import cherrypy
from WMCore.REST.Error import RESTError, ExecutionError, report_rest_error
try:
from cherrypy.lib import httputil
except ImportError:
from cherrypy.lib import http as httputil
def vary_by(header):
"""Add 'Vary' header for `header`."""
varies = cherrypy.response.headers.get('Vary', '')
varies = [x.strip() for x in varies.split(",") if x.strip()]
if header not in varies:
varies.append(header)
cherrypy.response.headers['Vary'] = ", ".join(varies)
def is_iterable(obj):
"""Check if `obj` is iterable."""
try:
iter(obj)
except TypeError:
return False
else:
return True
class RESTFormat(object):
def __call__(self, stream, etag):
"""Main entry point for generating output for `stream` using `etag`
object to generate ETag header. Returns a generator function for
producing a verbatim copy of `stream` item, including any premables
and trailers needed for the selected format. The intention is that
the caller will use the iterable to generate chunked HTTP transfer
encoding, or a simple result such as an image."""
# Make 'stream' iterable. We convert everything to chunks here.
# The final stream consumer will collapse small responses back
# to a single string. Convert files to 1MB chunks.
if stream is None:
stream = ['']
elif isinstance(stream, (str, bytes)):
stream = [stream]
elif hasattr(stream, "read"):
# types.FileType is not available anymore in python3,
# using it raises pylint W1624.
# Since cherrypy.lib.file_generator only uses the .read() attribute
# of a file, we simply check if stream.read() is present instead.
# https://github.com/cherrypy/cherrypy/blob/2a8aaccd649eb1011382c39f5cd93f76f980c0b1/cherrypy/lib/__init__.py#L64
stream = cherrypy.lib.file_generator(stream, 512 * 1024)
return self.stream_chunked(stream, etag, *self.chunk_args(stream))
def chunk_args(self, stream):
"""Return extra arguments needed for `stream_chunked()`. The default
return an empty tuple, so no extra arguments. Override in the derived
class if `stream_chunked()` needs preamble or trailer arguments."""
return tuple()
class XMLFormat(RESTFormat):
"""Format an iterable of objects into XML encoded in UTF-8.
Generates normally first a preamble, a stream of XML-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then XML encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if iterating over input is
deterministic. Beware in particular the key order for a dict is
arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is generated as an XML document whose top-level entity name
is defined by the label given at the formatter construction time. The
caller must define ``cherrypy.request.rest_generate_data`` to element
name for wrapping stream contents. Usually the top-level entity is the
application name and the ``cherrypy.request.rest_generate_data`` is
``result``.
Iterables are output as ``<array><i>ITEM</i><i>ITEM</i></array>``,
dictionaries as ``<dict><key>KEY</key><value>VALUE</value></dict>``.
`None` is output as empty contents, and hence there is no way to
distinguish `None` and an empty string from each other. Scalar types
are output as rendered by `str()`, but obviously XML encoding unsafe
characters. This class does not support formatting arbitrary types.
The formatter does not insert any spaces into the output. Although the
output is generated as a preamble, stream of objects, and trailer just
like by the `JSONFormatter`, each of which is a separate HTTP transfer
chunk, the output does *not* have guaranteed line-oriented structure
like the `JSONFormatter` produces. Note in particular that if the data
stream contains strings with newlines, the output will have arbitrary
line structure. On the other hand, as the output is well-formed XML,
virtually all SAX processors can read the stream incrementally even if
the client isn't able to fully preserve chunked HTTP transfer encoding."""
def __init__(self, label):
self.label = label
@staticmethod
def format_obj(obj):
"""Render an object `obj` into XML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
result = xml.sax.saxutils.escape(obj).encode("utf-8")
elif isinstance(obj, bytes):
result = xml.sax.saxutils.escape(obj)
elif isinstance(obj, (int, float, bool)):
result = xml.sax.saxutils.escape(str(obj)).encode("utf-8")
elif isinstance(obj, dict):
result = "<dict>"
for k, v in viewitems(obj):
result += "<key>%s</key><value>%s</value>" % \
(xml.sax.saxutils.escape(k).encode("utf-8"),
XMLFormat.format_obj(v))
result += "</dict>"
elif is_iterable(obj):
result = "<array>"
for v in obj:
result += "<i>%s</i>" % XMLFormat.format_obj(v)
result += "</array>"
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = XMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n"
preamble += "<%s>" % self.label
if cherrypy.request.rest_generate_preamble:
desc = self.format_obj(cherrypy.request.rest_generate_preamble)
preamble += "<desc>%s</desc>" % desc
preamble += "<%s>" % cherrypy.request.rest_generate_data
trailer = "</%s></%s>" % (cherrypy.request.rest_generate_data, self.label)
return preamble, trailer
class JSONFormat(RESTFormat):
"""Format an iterable of objects into JSON.
Generates normally first a preamble, a stream of JSON-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then JSON encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if `cjson.encode()` output is
deterministic for the input. Beware in particular the key order for a
dict is arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is always generated as a JSON dictionary. The caller must
define ``cherrypy.request.rest_generate_data`` as the key for actual
contents, usually something like "result". The `stream` value will be
generated as an array value for that key.
If ``cherrypy.request.rest_generate_preamble`` is a non-empty list, it
is output as the ``desc`` key value in the preamble before outputting
the `stream` contents. Otherwise the output consists solely of `stream`.
A common use of ``rest_generate_preamble`` is list of column labels
with `stream` an iterable of lists of column values.
The output is guaranteed to contain one line of preamble which starts a
dictionary and an array ("``{key: [``"), one line of JSON rendering of
each object in `stream`, with the first line starting with exactly one
space and second and subsequent lines starting with a comma, and one
final trailer line consisting of "``]}``". Each line is generated as a
HTTP transfer chunk. This format is fixed so readers can be constructed
to read and parse the stream incrementally one line at a time,
facilitating maximum throughput processing of the response."""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
obj = None
try:
for obj in stream:
chunk = comma + json.dumps(obj) + "\n"
etag.update(chunk)
yield chunk
comma = ","
except cherrypy.HTTPError:
raise
except GeneratorExit:
etag.invalidate()
trailer = None
raise
except Exception as exp:
print("ERROR, json.dumps failed to serialize %s, type %s\nException: %s" \
% (obj, type(obj), str(exp)))
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except cherrypy.HTTPError:
raise
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as JSON reply."""
comma = ""
preamble = "{"
trailer = "]}\n"
if cherrypy.request.rest_generate_preamble:
desc = json.dumps(cherrypy.request.rest_generate_preamble)
preamble += '"desc": %s' % desc
comma = ", "
preamble += '%s"%s": [\n' % (comma, cherrypy.request.rest_generate_data)
return preamble, trailer
class PrettyJSONFormat(JSONFormat):
""" Format used for human, (web browser)"""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = comma + json.dumps(obj, indent=2)
etag.update(chunk)
yield chunk
comma = ","
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
class PrettyJSONHTMLFormat(PrettyJSONFormat):
""" Format used for human, (web browser) wrap around html tag on json"""
@staticmethod
def format_obj(obj):
"""Render an object `obj` into HTML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
obj = xml.sax.saxutils.quoteattr(obj)
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, bytes):
obj = xml.sax.saxutils.quoteattr(str(obj, "utf-8"))
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, (int, float, bool)):
result = "%s" % obj
elif isinstance(obj, dict):
result = "<ul>"
for k, v in viewitems(obj):
result += "<li><b>%s</b>: %s</li>" % (k, PrettyJSONHTMLFormat.format_obj(v))
result += "</ul>"
elif is_iterable(obj):
empty = True
result = "<details open><ul>"
for v in obj:
empty = False
result += "<li>%s</li>" % PrettyJSONHTMLFormat.format_obj(v)
result += "</ul></details>"
if empty:
result = ""
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = PrettyJSONHTMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<html><body>"
trailer = "</body></html>"
return preamble, trailer
class RawFormat(RESTFormat):
"""Format an iterable of objects as raw data.
Generates raw data completely unmodified, for example image data or
streaming arbitrary external data files including even plain text.
Computes an ETag on the output in the process. The result is always
chunked, even simple strings on input. Usually small enough responses
will automatically be converted back to a single string response post
compression and ETag processing.
Any exceptions raised by input stream are reported to `report_rest_error`
and swallowed, as this is normally used to generate output for CherryPy
responses, which cannot handle exceptions reasonably after the output
generation begins; later processing may reconvert those back to exceptions
however (cf. stream_maybe_etag()). A X-REST-Status trailer header is added
if (and only if) an exception occurs; the client must inspect that to find
out if it got the complete output. There is normally 'X-REST-Status: 100'
in normal response headers, and it remains valid in case of success.
No ETag header is generated in case of an exception."""
def stream_chunked(self, stream, etag):
"""Generator for actually producing the output."""
try:
for chunk in stream:
etag.update(chunk)
yield chunk
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
except BaseException:
etag.invalidate()
raise
class DigestETag(object):
"""Compute hash digest over contents for ETag header."""
algorithm = None
def __init__(self, algorithm=None):
"""Prepare ETag computer."""
self.digest = hashlib.new(algorithm or self.algorithm)
def update(self, val):
"""Process response data `val`."""
if self.digest:
self.digest.update(encodeUnicodeToBytes(val))
def value(self):
"""Return ETag header value for current input."""
return self.digest and '"%s"' % self.digest.hexdigest()
def invalidate(self):
"""Invalidate the ETag calculator so value() will return None."""
self.digest = None
class MD5ETag(DigestETag):
"""Compute MD5 hash over contents for ETag header."""
algorithm = 'md5'
class SHA1ETag(DigestETag):
"""Compute SHA1 hash over contents for ETag header."""
algorithm = 'sha1'
def _stream_compress_identity(reply, *args):
"""Streaming compressor which returns original data unchanged."""
return reply
def _stream_compress_deflate(reply, compress_level, max_chunk):
"""Streaming compressor for the 'deflate' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
# Create zlib compression object, with raw data stream (negative window size)
z = zlib.compressobj(compress_level, zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
# Data pending compression. We only take entire chunks from original
# reply. Then process reply one chunk at a time. Whenever we have enough
# data to compress, spit it out flushing the zlib engine entirely, so we
# respect original chunk boundaries.
npending = 0
pending = []
for chunk in reply:
pending.append(chunk)
npending += len(chunk)
if npending >= max_chunk:
part = z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FULL_FLUSH)
pending = []
npending = 0
yield part
# Crank the compressor one more time for remaining output.
if npending:
yield z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FINISH)
def _stream_compress_gzip(reply, compress_level, *args):
"""Streaming compressor for the 'gzip' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
data = []
for chunk in reply:
data.append(chunk)
if data:
yield gzip.compress(encodeUnicodeToBytes("".join(data)), compress_level)
# : Stream compression methods.
_stream_compressor = {
'identity': _stream_compress_identity,
'deflate': _stream_compress_deflate,
'gzip': _stream_compress_gzip
}
def stream_compress(reply, available, compress_level, max_chunk):
"""If compression has been requested via Accept-Encoding request header,
and is granted for this response via `available` compression methods,
convert the streaming `reply` into another streaming response which is
compressed at the exact chunk boundaries of the original response,
except that individual chunks may be coalesced up to `max_chunk` size.
The `compression_level` tells how hard to compress, zero disables the
compression entirely."""
global _stream_compressor
for enc in cherrypy.request.headers.elements('Accept-Encoding'):
if enc.value not in available:
continue
elif enc.value in _stream_compressor and compress_level > 0:
# Add 'Vary' header for 'Accept-Encoding'.
vary_by('Accept-Encoding')
# Compress contents at original chunk boundaries.
if 'Content-Length' in cherrypy.response.headers:
del cherrypy.response.headers['Content-Length']
cherrypy.response.headers['Content-Encoding'] = enc.value
return _stream_compressor[enc.value](reply, compress_level, max_chunk)
return reply
def _etag_match(status, etagval, match, nomatch):
"""Match ETag value against any If-Match / If-None-Match headers."""
# Execute conditions only for status 2xx. We only handle GET/HEAD
# requests here, it makes no sense to try to do this for PUT etc.
# as they need to be handled as request pre-condition, not in the
# streaming out part here.
if cherrypy.request.method in ('GET', 'HEAD'):
status, dummyReason, dummyMsg = httputil.valid_status(status)
if status >= 200 and status <= 299:
if match and ("*" in match or etagval in match):
raise cherrypy.HTTPError(412, "Precondition on ETag %s failed" % etagval)
if nomatch and ("*" in nomatch or etagval in nomatch):
raise cherrypy.HTTPRedirect([], 304)
def _etag_tail(head, tail, etag):
"""Generator which first returns anything in `head`, then `tail`.
Sets ETag header at the end to value of `etag` if it's defined and
yields a value."""
for chunk in head:
yield encodeUnicodeToBytes(chunk)
for chunk in tail:
yield encodeUnicodeToBytes(chunk)
etagval = (etag and etag.value())
if etagval:
cherrypy.response.headers["ETag"] = etagval
def stream_maybe_etag(size_limit, etag, reply):
"""Maybe generate ETag header for the response, and handle If-Match
and If-None-Match request headers. Consumes the reply until at most
`size_limit` bytes. If the response fits into that size, adds the
ETag header and matches it against any If-Match / If-None-Match
request headers and replies appropriately.
If the response is fully buffered, and the `reply` generator actually
results in an error and sets X-Error-HTTP / X-Error-Detail headers,
converts that error back into a real HTTP error response. Otherwise
responds with the fully buffered body directly, without generator
and chunking. In other words, responses smaller than `size_limit`
are always fully buffered and replied immediately without chunking.
If the response is not fully buffered, it's guaranteed to be output
at original chunk boundaries.
Note that if this function is fed the output from `stream_compress()`
as it normally would be, the `size_limit` constrains the compressed
size, and chunk boundaries correspond to compressed chunks."""
req = cherrypy.request
res = cherrypy.response
match = [str(x) for x in (req.headers.elements('If-Match') or [])]
nomatch = [str(x) for x in (req.headers.elements('If-None-Match') or [])]
# If ETag is already set, match conditions and output without buffering.
etagval = res.headers.get('ETag', None)
if etagval:
_etag_match(res.status or 200, etagval, match, nomatch)
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail([], reply, None)
# Buffer up to size_limit bytes internally. This interally builds up the
# ETag value inside 'etag'. In case of exceptions the ETag invalidates.
# If we exceed the limit, fall back to streaming without checking ETag
# against If-Match/If-None-Match. We'll still set the ETag in the trailer
# headers, so clients which understand trailers will get the value; most
# clients including browsers will ignore them.
size = 0
result = []
for chunk in reply:
result.append(chunk)
size += len(chunk)
if size > size_limit:
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail(result, reply, etag)
# We've buffered the entire response, but it may be an error reply. The
# generator code does not know if it's allowed to raise exceptions, so
# it swallows all errors and converts them into X-* headers. We recover
# the original HTTP response code and message from X-Error-{HTTP,Detail}
# headers, if any are present.
err = res.headers.get('X-Error-HTTP', None)
if err:
message = res.headers.get('X-Error-Detail', 'Original error lost')
raise cherrypy.HTTPError(int(err), message)
# OK, we buffered the entire reply and it's ok. Check ETag match criteria.
# The original stream generator must guarantee that if it fails it resets
# the 'etag' value, even if the error handlers above didn't run.
etagval = etag.value()
if etagval:
res.headers['ETag'] = etagval
_etag_match(res.status or 200, etagval, match, nomatch)
# OK, respond with the buffered reply as a plain string.
res.headers['Content-Length'] = size
# TODO investigate why `result` is a list of bytes strings in py3
# The current solution seems to work in both py2 and py3
resp = b"" if PY3 else ""
for item in result:
resp += encodeUnicodeToBytesConditional(item, condition=PY3)
assert len(resp) == size
return resp | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/REST/Format.py | 0.843605 | 0.218909 | Format.py | pypi |
from Utils.Utilities import encodeUnicodeToBytes
from future.utils import viewitems, viewvalues, listitems
import os, hmac, hashlib, cherrypy
from tempfile import NamedTemporaryFile
from Utils.PythonVersion import PY3
from WMCore.REST.Main import RESTMain
from WMCore.REST.Auth import authz_canonical
from WMCore.Configuration import Configuration
def fake_authz_headers(hmac_key, method = 'HNLogin',
login='testuser', name='Test User',
dn="/test/dn", roles={}, format="list"):
"""Create fake authentication and authorisation headers compatible
with the CMSWEB front-ends. Assumes you have the HMAC signing key
the back-end will use to validate the headers.
:arg str hmac_key: binary key data for signing headers.
:arg str method: authentication method, one of X509Cert, X509Proxy,
HNLogin, HostIP, AUCookie or None.
:arg str login: account login name.
:arg str name: account user name.
:arg str dn: account X509 subject.
:arg dict roles: role dictionary, each role with 'site' and 'group' lists.
:returns: list of header name, value tuples to add to a HTTP request."""
headers = { 'cms-auth-status': 'OK', 'cms-authn-method': method }
if login:
headers['cms-authn-login'] = login
if name:
headers['cms-authn-name'] = name
if dn:
headers['cms-authn-dn'] = dn
for name, role in viewitems(roles):
name = 'cms-authz-' + authz_canonical(name)
headers[name] = []
for r in 'site', 'group':
if r in role:
headers[name].extend(["%s:%s" % (r, authz_canonical(v)) for v in role[r]])
headers[name] = " ".join(headers[name])
prefix = suffix = ""
hkeys = list(headers)
for hk in sorted(hkeys):
if hk != 'cms-auth-status':
prefix += "h%xv%x" % (len(hk), len(headers[hk]))
suffix += "%s%s" % (hk, headers[hk])
msg = prefix + "#" + suffix
if PY3:
hmac_key = encodeUnicodeToBytes(hmac_key)
msg = encodeUnicodeToBytes(msg)
cksum = hmac.new(hmac_key, msg, hashlib.sha1).hexdigest()
headers['cms-authn-hmac'] = cksum
if format == "list":
return listitems(headers)
else:
return headers
def fake_authz_key_file(delete=True):
"""Create temporary file for fake authorisation hmac signing key.
:returns: Instance of :class:`~.NamedTemporaryFile`, whose *data*
attribute contains the HMAC signing binary key."""
t = NamedTemporaryFile(delete=delete)
with open("/dev/urandom", "rb") as fd:
t.data = fd.read(20)
t.write(t.data)
t.seek(0)
return t
def setup_dummy_server(module_name, class_name, app_name = None, authz_key_file=None, port=8888):
"""Helper function to set up a :class:`~.RESTMain` server from given
module and class. Creates a fake server configuration and instantiates
the server application from it.
:arg str module_name: module from which to import test class.
:arg str class_type: name of the server test class.
:arg str app_name: optional test application name, 'test' by default.
:returns: tuple with the server object and authz hmac signing key."""
if authz_key_file:
test_authz_key = authz_key_file
else:
test_authz_key = fake_authz_key_file()
cfg = Configuration()
main = cfg.section_('main')
main.application = app_name or 'test'
main.silent = True
main.index = 'top'
main.authz_defaults = { 'role': None, 'group': None, 'site': None }
main.section_('tools').section_('cms_auth').key_file = test_authz_key.name
app = cfg.section_(app_name or 'test')
app.admin = 'dada@example.org'
app.description = app.title = 'Test'
views = cfg.section_('views')
top = views.section_('top')
top.object = module_name + "." + class_name
server = RESTMain(cfg, os.getcwd())
server.validate_config()
server.setup_server()
server.install_application()
cherrypy.config.update({'server.socket_port': port})
cherrypy.config.update({'server.socket_host': '127.0.0.1'})
cherrypy.config.update({'request.show_tracebacks': True})
cherrypy.config.update({'environment': 'test_suite'})
for app in viewvalues(cherrypy.tree.apps):
if '/' in app.config:
app.config["/"]["request.show_tracebacks"] = True
return server, test_authz_key | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/REST/Test.py | 0.631935 | 0.193147 | Test.py | pypi |
from __future__ import division, print_function, absolute_import
from future import standard_library
standard_library.install_aliases()
# system modules
import json
import logging
import math
import re
import time
from urllib.parse import quote, unquote
# WMCore modules
from Utils.IteratorTools import grouper
from Utils.CertTools import ckey, cert
from WMCore.Services.pycurl_manager import RequestHandler
from WMCore.Services.pycurl_manager import getdata as multi_getdata
# DBS agregators
from dbs.apis.dbsClient import aggRuns, aggFileLumis
# static variables
STEP_PAT = re.compile(r'Step[0-9]')
TASK_PAT = re.compile(r'Task[0-9]')
def hasHTTPFailed(row):
"""
Evaluates whether the HTTP request through PyCurl failed or not.
:param row: dictionary data returned from pycurl_manager module
:return: a boolean confirming failure or not
"""
if 'data' not in row:
return True
if int(row.get('code', 200)) == 200:
return False
return True
def getMSLogger(verbose, logger=None):
"""
_getMSLogger_
Return a logger object using the standard WMCore formatter
:param verbose: boolean setting debug or not
:return: a logger object
"""
if logger:
return logger
verbose = logging.DEBUG if verbose else logging.INFO
logger = logging.getLogger()
logging.basicConfig(format="%(asctime)s:%(levelname)s:%(module)s: %(message)s",
level=verbose)
return logger
def isRelVal(reqDict):
"""
Helper function to evaluate whether the workflow is RelVal or not.
:param reqDict: dictionary with the workflow description
:return: True if it's a RelVal workflow, otherwise False
"""
return reqDict.get("SubRequestType", "") in ['RelVal', 'HIRelVal']
def dbsInfo(datasets, dbsUrl):
"Provides DBS info about dataset blocks"
datasetBlocks = {}
datasetSizes = {}
datasetTransfers = {}
if not datasets:
return datasetBlocks, datasetSizes, datasetTransfers
urls = ['%s/blocks?detail=True&dataset=%s' % (dbsUrl, d) for d in datasets]
logging.info("Executing %d requests against DBS 'blocks' API, with details", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if hasHTTPFailed(row):
print("FAILURE: dbsInfo for %s. Error: %s %s" % (dataset, row.get('code'), row.get('error')))
continue
rows = json.loads(row['data'])
blocks = []
size = 0
datasetTransfers.setdefault(dataset, {}) # flat dict in the format of blockName: blockSize
for item in rows:
blocks.append(item['block_name'])
size += item['block_size']
datasetTransfers[dataset].update({item['block_name']: item['block_size']})
datasetBlocks[dataset] = blocks
datasetSizes[dataset] = size
return datasetBlocks, datasetSizes, datasetTransfers
def getPileupDocs(mspileupUrl, queryDict):
"""
Fetch documents from MSPileup according to the query passed in.
:param mspileupUrl: string with the MSPileup url
:param queryDict: dictionary with the MongoDB query to run
:return: returns a list with all the pileup objects, or raises
an exception in case of failure
"""
mgr = RequestHandler()
headers = {'Content-Type': 'application/json'}
data = mgr.getdata(mspileupUrl, queryDict, headers, verb='POST',
ckey=ckey(), cert=cert(), encode=True, decode=True)
if data and data.get("result", []):
if "error" in data["result"][0]:
msg = f"Failed to retrieve MSPileup documents with query: {queryDict}"
msg += f" and error message: {data}"
raise RuntimeError(msg)
return data["result"]
def getPileupDatasetSizes(datasets, phedexUrl):
"""
Given a list of datasets, find all their blocks with replicas
available, i.e., blocks that have valid files to be processed,
and calculate the total dataset size
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:return: a dictionary of datasets and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
sizeByDset = {}
if not datasets:
return sizeByDset
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
sizeByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
sizeByDset.setdefault(dataset, 0)
try:
for item in rows['phedex']['block']:
sizeByDset[dataset] += item['bytes']
except Exception as exc:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s" % (dataset, str(exc)))
sizeByDset[dataset] = None
return sizeByDset
def getBlockReplicasAndSize(datasets, phedexUrl, group=None):
"""
Given a list of datasets, find all their blocks with replicas
available (thus blocks with at least 1 valid file), completed
and subscribed.
If PhEDEx group is provided, make sure it's subscribed under that
same group.
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:param group: optional PhEDEx group name
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
dsetBlockSize = {}
if not datasets:
return dsetBlockSize
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getBlockReplicasAndSize for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
dsetBlockSize.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
dsetBlockSize.setdefault(dataset, {})
try:
for item in rows['phedex']['block']:
block = {item['name']: {'blockSize': item['bytes'], 'locations': []}}
for repli in item['replica']:
if repli['complete'] == 'y' and repli['subscribed'] == 'y':
if not group:
block[item['name']]['locations'].append(repli['node'])
elif repli['group'] == group:
block[item['name']]['locations'].append(repli['node'])
dsetBlockSize[dataset].update(block)
except Exception as exc:
print("Failure in getBlockReplicasAndSize for dataset %s. Error: %s" % (dataset, str(exc)))
dsetBlockSize[dataset] = None
return dsetBlockSize
def getPileupSubscriptions(datasets, phedexUrl, group=None, percentMin=99):
"""
Provided a list of datasets, find dataset level subscriptions where it's
as complete as `percent_min`.
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:param group: optional string with the PhEDEx group
:param percent_min: only return subscriptions that are this complete
:return: a dictionary of datasets and a list of their location.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
locationByDset = {}
if not datasets:
return locationByDset
if group:
url = "%s/subscriptions?group=%s" % (phedexUrl, group)
url += "&percent_min=%s&dataset=%s"
else:
url = "%s/subscriptions?" % phedexUrl
url += "percent_min=%s&dataset=%s"
urls = [url % (percentMin, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'subscriptions' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].rsplit('=')[-1]
if row['data'] is None:
print("Failure in getPileupSubscriptions for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
locationByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
locationByDset.setdefault(dataset, [])
try:
for item in rows['phedex']['dataset']:
for subs in item['subscription']:
locationByDset[dataset].append(subs['node'])
except Exception as exc:
print("Failure in getPileupSubscriptions for dataset %s. Error: %s" % (dataset, str(exc)))
locationByDset[dataset] = None
return locationByDset
def getBlocksByDsetAndRun(datasetName, runList, dbsUrl):
"""
Given a dataset name and a list of runs, find all the blocks
:return: flat list of blocks
"""
blocks = set()
if isinstance(runList, set):
runList = list(runList)
urls = []
for runSlice in grouper(runList, 50):
urls.append('%s/blocks?run_num=%s&dataset=%s' % (dbsUrl, str(runSlice).replace(" ", ""), datasetName))
logging.info("Executing %d requests against DBS 'blocks' API, with run_num list", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].rsplit('=')[-1]
if hasHTTPFailed(row):
msg = "Failure in getBlocksByDsetAndRun for %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
for item in rows:
blocks.add(item['block_name'])
return list(blocks)
def getFileLumisInBlock(blocks, dbsUrl, validFileOnly=1):
"""
Given a list of blocks, find their file run lumi information
in DBS for up to 10 blocks concurrently
:param blocks: list of block names
:param dbsUrl: string with the DBS URL
:param validFileOnly: integer flag for valid files only or not
:return: a dict of blocks with list of file/run/lumi info
"""
runLumisByBlock = {}
urls = ['%s/filelumis?validFileOnly=%d&block_name=%s' % (dbsUrl, validFileOnly, quote(b)) for b in blocks]
# limit it to 10 concurrent calls not to overload DBS
logging.info("Executing %d requests against DBS 'filelumis' API, concurrency limited to 10", len(urls))
data = multi_getdata(urls, ckey(), cert(), num_conn=10)
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
if hasHTTPFailed(row):
msg = "Failure in getFileLumisInBlock for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
rows = aggFileLumis(rows) # adjust to DBS Go server output
runLumisByBlock.setdefault(blockName, [])
for item in rows:
runLumisByBlock[blockName].append(item)
return runLumisByBlock
def findBlockParents(blocks, dbsUrl):
"""
Helper function to find block parents given a list of block names.
Return a dictionary in the format of:
{"child dataset name": {"child block": ["parent blocks"],
"child block": ["parent blocks"], ...}}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
parentsByBlock = {}
urls = ['%s/blockparents?block_name=%s' % (dbsUrl, quote(b)) for b in blocks]
logging.info("Executing %d requests against DBS 'blockparents' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
dataset = blockName.split("#")[0]
if hasHTTPFailed(row):
print("Failure in findBlockParents for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error')))
parentsByBlock.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
try:
if dataset in parentsByBlock and parentsByBlock[dataset] is None:
# then one of the block calls has failed, keep it failed!
continue
parentsByBlock.setdefault(dataset, {})
for item in rows:
parentsByBlock[dataset].setdefault(item['this_block_name'], set())
parentsByBlock[dataset][item['this_block_name']].add(item['parent_block_name'])
except Exception as exc:
print("Failure in findBlockParents for block %s. Error: %s" % (blockName, str(exc)))
parentsByBlock[dataset] = None
return parentsByBlock
def getRunsInBlock(blocks, dbsUrl):
"""
Provided a list of block names, find their run numbers
:param blocks: list of block names
:param dbsUrl: string with the DBS URL
:return: a dictionary of block names and a list of run numbers
"""
runsByBlock = {}
urls = ['%s/runs?block_name=%s' % (dbsUrl, quote(b)) for b in blocks]
logging.info("Executing %d requests against DBS 'runs' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
if hasHTTPFailed(row):
msg = "Failure in getRunsInBlock for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
rows = aggRuns(rows) # adjust to DBS Go server output
runsByBlock[blockName] = rows[0]['run_num']
return runsByBlock
def getWorkflow(requestName, reqMgrUrl):
"Get list of workflow info from ReqMgr2 data-service for given request name"
headers = {'Accept': 'application/json'}
params = {}
url = '%s/data/request/%s' % (reqMgrUrl, requestName)
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
data = json.loads(res)
return data.get('result', [])
def getDetoxQuota(url):
"Get list of workflow info from ReqMgr2 data-service for given request name"
headers = {}
params = {}
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
res = res.split('\n')
return res
def eventsLumisInfo(inputs, dbsUrl, validFileOnly=0, sumOverLumi=0):
"Get information about events and lumis for given set of inputs: blocks or datasets"
what = 'dataset'
eventsLumis = {}
if not inputs:
return eventsLumis
if '#' in inputs[0]: # inputs are list of blocks
what = 'block_name'
urls = ['%s/filesummaries?validFileOnly=%s&sumOverLumi=%s&%s=%s'
% (dbsUrl, validFileOnly, sumOverLumi, what, quote(i)) for i in inputs]
data = multi_getdata(urls, ckey(), cert())
for row in data:
data = unquote(row['url'].split('=')[-1])
if hasHTTPFailed(row):
print("FAILURE: eventsLumisInfo for %s. Error: %s %s" % (data,
row.get('code'),
row.get('error')))
continue
rows = json.loads(row['data'])
for item in rows:
eventsLumis[data] = item
return eventsLumis
def getEventsLumis(dataset, dbsUrl, blocks=None, eventsLumis=None):
"Helper function to return number of events/lumis for given dataset or blocks"
nevts = nlumis = 0
if blocks:
missingBlocks = [b for b in blocks if b not in eventsLumis]
if missingBlocks:
eLumis = eventsLumisInfo(missingBlocks, dbsUrl)
eventsLumis.update(eLumis)
for block in blocks:
data = eventsLumis[block]
nevts += data['num_event']
nlumis += data['num_lumi']
return nevts, nlumis
if eventsLumis and dataset in eventsLumis:
data = eventsLumis[dataset]
return data['num_event'], data['num_lumi']
eLumis = eventsLumisInfo([dataset], dbsUrl)
data = eLumis.get(dataset, {'num_event': 0, 'num_lumi': 0})
return data['num_event'], data['num_lumi']
def getComputingTime(workflow, eventsLumis=None, unit='h', dbsUrl=None, logger=None):
"Return computing time per give workflow"
logger = getMSLogger(verbose=True, logger=logger)
cput = None
if 'InputDataset' in workflow:
dataset = workflow['InputDataset']
if 'BlockWhitelist' in workflow and workflow['BlockWhitelist']:
nevts, _ = getEventsLumis(dataset, dbsUrl, workflow['BlockWhitelist'], eventsLumis)
else:
nevts, _ = getEventsLumis(dataset, dbsUrl, eventsLumis=eventsLumis)
tpe = workflow['TimePerEvent']
cput = nevts * tpe
elif 'Chain' in workflow['RequestType']:
base = workflow['RequestType'].replace('Chain', '')
itask = 1
cput = 0
carryOn = {}
while True:
t = '%s%d' % (base, itask)
itask += 1
if t in workflow:
task = workflow[t]
if 'InputDataset' in task:
dataset = task['InputDataset']
if 'BlockWhitelist' in task and task['BlockWhitelist']:
nevts, _ = getEventsLumis(dataset, dbsUrl, task['BlockWhitelist'], eventsLumis)
else:
nevts, _ = getEventsLumis(dataset, dbsUrl, eventsLumis=eventsLumis)
elif 'Input%s' % base in task:
nevts = carryOn[task['Input%s' % base]]
elif 'RequestNumEvents' in task:
nevts = float(task['RequestNumEvents'])
else:
logger.debug("this is not supported, making it zero cput")
nevts = 0
tpe = task.get('TimePerEvent', 1)
carryOn[task['%sName' % base]] = nevts
if 'FilterEfficiency' in task:
carryOn[task['%sName' % base]] *= task['FilterEfficiency']
cput += tpe * nevts
else:
break
else:
nevts = float(workflow.get('RequestNumEvents', 0))
feff = float(workflow.get('FilterEfficiency', 1))
tpe = workflow.get('TimePerEvent', 1)
cput = nevts / feff * tpe
if cput is None:
return 0
if unit == 'm':
cput = cput / (60.)
if unit == 'h':
cput = cput / (60. * 60.)
if unit == 'd':
cput = cput / (60. * 60. * 24.)
return cput
def sigmoid(x):
"Sigmoid function"
return 1. / (1 + math.exp(-x))
def getNCopies(cpuHours, minN=2, maxN=3, weight=50000, constant=100000):
"Calculate number of copies for given workflow"
func = sigmoid(-constant / weight)
fact = (maxN - minN) / (1 - func)
base = (func * maxN - minN) / (func - 1)
return int(base + fact * sigmoid((cpuHours - constant) / weight))
def teraBytes(size):
"Return size in TB (Terabytes)"
return size / (1000 ** 4)
def gigaBytes(size):
"Return size in GB (Gigabytes), rounded to 2 digits"
return round(size / (1000 ** 3), 2)
def elapsedTime(time0, msg='Elapsed time', ndigits=1):
"Helper function to return elapsed time message"
msg = "%s: %s sec" % (msg, round(time.time() - time0, ndigits))
return msg
def getRequest(url, params):
"Helper function to GET data from given URL"
mgr = RequestHandler()
headers = {'Accept': 'application/json'}
verbose = 0
if 'verbose' in params:
verbose = params['verbose']
del params['verbose']
data = mgr.getdata(url, params, headers, ckey=ckey(), cert=cert(), verbose=verbose)
return data
def postRequest(url, params):
"Helper function to POST request to given URL"
mgr = RequestHandler()
headers = {'Accept': 'application/json'}
verbose = 0
if 'verbose' in params:
verbose = params['verbose']
del params['verbose']
data = mgr.getdata(url, params, headers, ckey=ckey(), cert=cert(),
verb='POST', verbose=verbose)
return data
def getIO(request, dbsUrl):
"Get input/output info about given request"
lhe = False
primary = set()
parent = set()
secondary = set()
if 'Chain' in request['RequestType']:
base = request['RequestType'].replace('Chain', '')
item = 1
while '%s%d' % (base, item) in request:
alhe, aprimary, aparent, asecondary = \
ioForTask(request['%s%d' % (base, item)], dbsUrl)
if alhe:
lhe = True
primary.update(aprimary)
parent.update(aparent)
secondary.update(asecondary)
item += 1
else:
lhe, primary, parent, secondary = ioForTask(request, dbsUrl)
return lhe, primary, parent, secondary
def ioForTask(request, dbsUrl):
"Return lfn, primary, parent and secondary datasets for given request"
lhe = False
primary = set()
parent = set()
secondary = set()
if 'InputDataset' in request:
datasets = request['InputDataset']
datasets = datasets if isinstance(datasets, list) else [datasets]
primary = set([r for r in datasets if r])
if primary and 'IncludeParent' in request and request['IncludeParent']:
parent = findParent(primary, dbsUrl)
if 'MCPileup' in request:
pileups = request['MCPileup']
pileups = pileups if isinstance(pileups, list) else [pileups]
secondary = set([r for r in pileups if r])
if 'LheInputFiles' in request and request['LheInputFiles'] in ['True', True]:
lhe = True
return lhe, primary, parent, secondary
def findParent(datasets, dbsUrl):
"""
Helper function to find the parent dataset.
It returns a dictionary key'ed by the child dataset
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
parentByDset = {}
if not datasets:
return parentByDset
urls = ['%s/datasetparents?dataset=%s' % (dbsUrl, d) for d in datasets]
logging.info("Executing %d requests against DBS 'datasetparents' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if hasHTTPFailed(row):
print("Failure in findParent for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
parentByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
try:
for item in rows:
parentByDset[item['this_dataset']] = item['parent_dataset']
except Exception as exc:
print("Failure in findParent for dataset %s. Error: %s" % (dataset, str(exc)))
parentByDset[dataset] = None
return parentByDset | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/MicroService/Tools/Common.py | 0.69181 | 0.217982 | Common.py | pypi |
from __future__ import print_function, division, absolute_import
from builtins import str
from future.utils import viewitems
from future import standard_library
standard_library.install_aliases()
import datetime
import json
import logging
import re
from urllib.parse import quote, unquote
from Utils.CertTools import cert, ckey
from WMCore.Services.pycurl_manager import RequestHandler
from WMCore.Services.pycurl_manager import getdata as multi_getdata
### Amount of days that we wait for stuck rules to be sorted
### After that, the rule is not considered and a new rule is created
STUCK_LIMIT = 7 # 7 days
def parseNewLineJson(stream):
"""
Parse newline delimited json streaming data
"""
for line in stream.split("\n"):
if line:
yield json.loads(line)
def stringDateToEpoch(strDate):
"""
Given a date/time in the format of:
'Thu, 29 Apr 2021 13:15:42 UTC'
it returns an integer with the equivalent EPOCH time
:param strDate: a string with the date and time
:return: the equivalent EPOCH time (integer)
"""
timestamp = datetime.datetime.strptime(strDate, "%a, %d %b %Y %H:%M:%S %Z")
return int(timestamp.strftime('%s'))
def getRucioToken(rucioAuthUrl, rucioAcct):
"""
Provided a Rucio account, fetch a token from the authentication server
:param rucioAuthUrl: url to the rucio authentication server
:param rucioAcct: rucio account to be used
:return: an integer with the expiration time in EPOCH
"""
params = {}
headers = {"X-Rucio-Account": rucioAcct}
url = '%s/auth/x509' % rucioAuthUrl
logging.info("Requesting a token to Rucio for account: %s, against url: %s", rucioAcct, rucioAuthUrl)
mgr = RequestHandler()
res = mgr.getheader(url, params=params, headers=headers, ckey=ckey(), cert=cert())
if res.getReason() == "OK":
userToken = res.getHeaderKey('X-Rucio-Auth-Token')
tokenExpiration = res.getHeaderKey('X-Rucio-Auth-Token-Expires')
logging.info("Retrieved Rucio token valid until: %s", tokenExpiration)
# convert the human readable expiration time to EPOCH time
tokenExpiration = stringDateToEpoch(tokenExpiration)
return userToken, tokenExpiration
raise RuntimeError("Failed to acquire a Rucio token. Error: {}".format(res.getReason()))
def renewRucioToken(rucioAuthUrl, userToken):
"""
Provided a user Rucio token, check it's lifetime and extend it by another hour
:param rucioAuthUrl: url to the rucio authentication server
:param rucioAcct: rucio account to be used
:return: a datetime.datetime object with the new token lifetime
"""
params = {}
headers = {"X-Rucio-Auth-Token": userToken}
url = '%s/auth/validate' % rucioAuthUrl
logging.info("Renewing the Rucio token...")
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
try:
newExpiration = eval(res)['lifetime']
except Exception as exc:
raise RuntimeError("Failed to renew Rucio token. Response: {} Error: {}".format(res, str(exc)))
return newExpiration
def getPileupContainerSizesRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of containers, find their total size in Rucio
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a flat dictionary of container and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE: Rucio version of getPileupDatasetSizes()
"""
sizeByDset = {}
if not containers:
return sizeByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = ['{}/dids/{}/{}?dynamic=anything'.format(rucioUrl, scope, cont) for cont in containers]
logging.info("Executing %d requests against Rucio for the container size", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split('/dids/{}/'.format(scope))[1]
container = container.replace("?dynamic=anything", "")
if row['data'] is None:
msg = "Failure in getPileupContainerSizesRucio for container {}. Response: {}".format(container, row)
logging.error(msg)
sizeByDset.setdefault(container, None)
continue
response = json.loads(row['data'])
try:
sizeByDset.setdefault(container, response['bytes'])
except KeyError:
msg = "getPileupContainerSizesRucio function did not return a valid response for container: %s. Error: %s"
logging.error(msg, container, response)
sizeByDset.setdefault(container, None)
continue
return sizeByDset
def listReplicationRules(containers, rucioAccount, grouping,
rucioUrl, rucioToken, scope="cms"):
"""
List all the replication rules for the input filters provided.
It builds a dictionary of container name and the locations where
they have a rule locking data on, with some additional rule state
logic in the code.
:param containers: list of container names
:param rucioAccount: string with the rucio account
:param grouping: rule grouping string, only "A" or "D" are allowed
:param rucioUrl: string with the Rucio url
:param rucioToken: string with the Rucio token
:param scope: string with the data scope
:return: a flat dictionary key'ed by the container name, with a list of RSE
expressions that still need to be resolved
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE-2: Available rule states can be found at:
https://github.com/rucio/rucio/blob/16f39dffa1608caa0a1af8bbc0fcff2965dccc50/lib/rucio/db/sqla/constants.py#L180
"""
locationByContainer = {}
if not containers:
return locationByContainer
if grouping not in ["A", "D"]:
raise RuntimeError("Replication rule grouping value provided ({}) is not allowed!".format(grouping))
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = []
for cont in containers:
urls.append('{}/rules/?scope={}&account={}&grouping={}&name={}'.format(rucioUrl, scope, rucioAccount,
grouping, quote(cont, safe="")))
logging.info("Executing %d requests against Rucio to list replication rules", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = unquote(row['url'].split("name=")[1])
if "200 OK" not in row['headers']:
msg = "Failure in listReplicationRules for container {}. Response: {}".format(container, row)
logging.error(msg)
locationByContainer.setdefault(container, None)
continue
try:
locationByContainer.setdefault(container, [])
for item in parseNewLineJson(row['data']):
if item['state'] in ["U", "SUSPENDED", "R", "REPLICATING", "I", "INJECT"]:
msg = "Container %s has a rule ID %s in state %s. Will try to create a new rule."
logging.warning(msg, container, item['id'], item['state'])
continue
elif item['state'] in ["S", "STUCK"]:
if item['error'] == 'NO_SOURCES:NO_SOURCES':
msg = "Container {} has a STUCK rule with NO_SOURCES.".format(container)
msg += " Data could be lost forever... Rule info is: {}".format(item)
logging.warning(msg)
continue
# then calculate for how long it's been stuck
utcTimeNow = int(datetime.datetime.utcnow().strftime('%s'))
if item['stuck_at']:
stuckAt = stringDateToEpoch(item['stuck_at'])
else:
# consider it to be stuck since its creation
stuckAt = stringDateToEpoch(item['created_at'])
daysStuck = (utcTimeNow - stuckAt) // (24 * 60 * 60)
if daysStuck > STUCK_LIMIT:
msg = "Container {} has a STUCK rule for {} days (limit set to: {}).".format(container,
daysStuck,
STUCK_LIMIT)
msg += " Not going to use it! Rule info: {}".format(item)
logging.warning(msg)
continue
else:
msg = "Container {} has a STUCK rule for only {} days.".format(container, daysStuck)
msg += " Considering it for the pileup location"
logging.info(msg)
else:
logging.info("Container %s has rule ID %s in state %s, using it.",
container, item['id'], item['state'])
### NOTE: this is not an RSE name, but an RSE expression that still needs to be resolved
locationByContainer[container].append(item['rse_expression'])
except Exception as exc:
msg = "listReplicationRules function did not return a valid response for container: %s."
msg += "Server responded with: %s\nError: %s"
logging.exception(msg, container, str(exc), row['data'])
locationByContainer.setdefault(container, None)
continue
return locationByContainer
def getPileupSubscriptionsRucio(datasets, rucioUrl, rucioToken, scope="cms"):
"""
Provided a list of datasets, find dataset level subscriptions where it's
as complete as `percent_min`.
:param datasets: list of dataset names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary of datasets and a list of their location.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
# FIXME: we should definitely make a feature request to Rucio...
# so much, just to get the final RSEs for a container!!!
locationByDset = {}
if not datasets:
return locationByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
# first, resolve the dataset into blocks
blocksByDset = getContainerBlocksRucio(datasets, rucioUrl, rucioToken, scope)
urls = []
for _dset, blocks in viewitems(blocksByDset):
if blocks:
for block in blocks:
urls.append('{}/replicas/{}/{}/datasets'.format(rucioUrl, scope, quote(block)))
# this is going to be bloody expensive in terms of HTTP requests
logging.info("Executing %d requests against Rucio replicas API for blocks", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
block = row['url'].split("/{}/".format(scope))[1]
block = unquote(re.sub("/datasets$", "", block, 1))
container = block.split("#")[0]
locationByDset.setdefault(container, set())
if row['data'] is None:
msg = "Failure in getPileupSubscriptionsRucio container {} and block {}.".format(container, block)
msg += " Response: {}".format(row)
logging.error(msg)
locationByDset[container] = None
continue
if locationByDset[container] is None:
# then one of the block requests failed, skip the whole dataset
continue
thisBlockRSEs = set()
for item in parseNewLineJson(row['data']):
if item['state'] == "AVAILABLE":
thisBlockRSEs.add(item["rse"])
logging.info("Block: %s is available at: %s", block, thisBlockRSEs)
# now we have the final block location
if not locationByDset[container]:
# then this is the first block of this dataset
locationByDset[container] = thisBlockRSEs
else:
# otherwise, make an intersection of them
locationByDset[container] = locationByDset[container] & thisBlockRSEs
return locationByDset
def getBlocksAndSizeRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of containers, find all their correspondent blocks and their sizes.
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE2: meant to return an output similar to Common.getBlockReplicasAndSize
"""
contBlockSize = {}
if not containers:
return contBlockSize
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = []
for cont in containers:
urls.append('{}/dids/{}/dids/search?type=dataset&long=True&name={}'.format(rucioUrl, scope, quote(cont + "#*")))
logging.info("Executing %d requests against Rucio DIDs search API for containers", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split("name=")[1]
container = unquote(container).replace("#*", "")
contBlockSize.setdefault(container, {})
if row['data'] in [None, ""]:
msg = "Failure in getBlocksAndSizeRucio function for container {}. Response: {}".format(container, row)
logging.error(msg)
contBlockSize[container] = None
continue
for item in parseNewLineJson(row['data']):
# NOTE: we do not care about primary block location in Rucio
contBlockSize[container][item['name']] = {"blockSize": item['bytes'], "locations": []}
return contBlockSize
### NOTE: likely not going to be used for a while
def getContainerBlocksRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Provided a list of containers, find all their blocks.
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary key'ed by the datasets with a list of blocks.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
blocksByDset = {}
if not containers:
return blocksByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = ['{}/dids/{}/{}/dids'.format(rucioUrl, scope, cont) for cont in containers]
logging.info("Executing %d requests against Rucio DIDs API for blocks in containers", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split("/{}/".format(scope))[1]
container = re.sub("/dids$", "", container, 1)
if not row['data']:
logging.warning("Dataset: %s has no blocks in Rucio", container)
blocksByDset.setdefault(container, [])
for item in parseNewLineJson(row['data']):
blocksByDset[container].append(item["name"])
return blocksByDset
### NOTE: likely not going to be used for a while
def getBlockReplicasAndSizeRucio(datasets, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of datasets, find all their blocks with replicas
available.
:param datasets: list of dataset names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
dsetBlockSize = {}
if not datasets:
return dsetBlockSize
headers = {"X-Rucio-Auth-Token": rucioToken}
# first, figure out their block names
blocksByDset = getContainerBlocksRucio(datasets, rucioUrl, rucioToken, scope=scope)
urls = []
for _dset, blocks in viewitems(blocksByDset):
for block in blocks:
urls.append('{}/replicas/{}/{}/datasets'.format(rucioUrl, scope, quote(block)))
# next, query the replicas API for the block location
# this is going to be bloody expensive in terms of HTTP requests
logging.info("Executing %d requests against Rucio replicas API for blocks", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
block = row['url'].split("/{}/".format(scope))[1]
block = unquote(re.sub("/datasets$", "", block, 1))
container = block.split("#")[0]
dsetBlockSize.setdefault(container, dict())
if row['data'] is None:
msg = "Failure in getBlockReplicasAndSizeRucio for container {} and block {}.".format(container, block)
msg += " Response: {}".format(row)
logging.error(msg)
dsetBlockSize[container] = None
continue
if dsetBlockSize[container] is None:
# then one of the block requests failed, skip the whole dataset
continue
thisBlockRSEs = []
blockBytes = 0
for item in parseNewLineJson(row['data']):
blockBytes = item['bytes']
if item['state'] == "AVAILABLE":
thisBlockRSEs.append(item["rse"])
# now we have the final block location
if not blockBytes and not thisBlockRSEs:
logging.warning("Block: %s has no replicas and no size", block)
else:
dsetBlockSize[container][block] = {"locations": thisBlockRSEs, "blockSize": blockBytes}
return dsetBlockSize | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/MicroService/Tools/PycurlRucio.py | 0.661923 | 0.218305 | PycurlRucio.py | pypi |
from __future__ import print_function, division
# system modules
import os
import re
# WMCore modules
from WMCore.REST.Server import RESTFrontPage
class FrontPage(RESTFrontPage):
"""MicroService front page.
MicroService provides only one web page, the front page. The page just
loads the javascript user interface, complete with CSS and all JS
code embedded into it.
The JavaScript code performs all the app functionality via the REST
interface defined by the :class:`~.Data` class.
"""
def __init__(self, app, config, mount):
"""
:arg app: reference to the application object.
:arg config: reference to the configuration.
:arg str mount: URL mount point."""
mainroot = 'microservice' # entry point in access URL
wpath = os.getenv('MS_STATIC_ROOT', '')
print(wpath)
if not wpath:
content = os.path.abspath(__file__).rsplit('/', 5)[0]
xlib = (__file__.find("/xlib/") >= 0 and "x") or ""
wpath = "%s/%sdata/" % (content, xlib)
if not wpath.endswith('/'):
wpath += '/'
print(self.__class__.__name__, "static content: %s" % wpath)
mdict = {"root": wpath, "rx": re.compile(r"^[a-z]+/[-a-z0-9]+\.(?:html)$")}
tdict = {"root": wpath + "templates/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:html|tmpl)$")}
jdict = {"root": wpath + "js/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:js)$")}
cdict = {"root": wpath + "css/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\..*(?:css)$")}
idict = {"root": wpath + "images/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:png|gif|jpg)$")}
roots = {mainroot: mdict, "templates": tdict,
"js": jdict, "css": cdict, "images": idict}
# location of frontpage in the root, e.g. microservice
frontpage = "%s/templates/index.html" % mainroot
RESTFrontPage.__init__(self, app, config, mount, frontpage, roots) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/MicroService/WebGui/FrontPage.py | 0.585931 | 0.161816 | FrontPage.py | pypi |
from __future__ import division, print_function
from builtins import object, str, bytes
from future.utils import viewitems
from copy import deepcopy
from Utils.IteratorTools import flattenList
class WfParser(object):
"""
Workflow description parser class.
"""
def __init__(self, docSchema):
"""
The init method for the Workflow parser class.
:param docSchema: Document template in the form of a list of tuples as follows:
[('KeyName', DefaultValue, type),
('KeyName', DefaultValue, type),
...]
To be used for identifying the fields to be searched for
in the workflow description
"""
self.extDoc = {}
for tup in docSchema:
self.extDoc[tup[0]] = {'keyName': tup[0],
'values': list(),
'default': tup[1],
'type': tup[2]}
def __call__(self, wfDescr):
"""
The Call method for the Workflow parser class.
"""
self._paramFinder(wfDescr)
self._wfParse()
return self.extDoc
def _paramFinder(self, wfObj):
"""
Private method used to recursively traverse a workflow description
and search for all the keyNames defined in the extDoc auxiliary data
structure. If a 'keyName' happens to be present in several nested levels,
or in several similar objects from the same level (like {'Task1': {},
'Task2': {} ...), all the values found are accumulated in the respective
(flat) list at extDoc[keyName]['values'], which is later to be converted
to the originally expected type for the given field as described in the
Document Template
:param wfObj: Dictionary containing the workflow description
"""
if isinstance(wfObj, (list, set, tuple)):
for value in wfObj:
self._paramFinder(value)
if isinstance(wfObj, dict):
for key, value in viewitems(wfObj):
self._paramFinder(value)
for key in self.extDoc:
if key in wfObj:
self.extDoc[key]['values'].append(deepcopy(wfObj[key]))
def _wfParse(self):
"""
Workflow description parser. Given a document template representing all the
keyNames to be searched and a workflow description to search in recursively,
returns all the fields that it can find aggregated according to the rules bellow:
* if the number of found key instances is 0 - sets the default value from
the template.
* if the number of found key instances is 1 - sets the so found value from the
workflow description and converts it back to the form expected and described
in the template (removes the outermost list used for value aggregation)
* if the number of found key instances is > 1 - the values are aggregated
according to the expected types and data structure defined in the
template as follows:
* bool: sets it to True if any of the values found was set to True
* list: chains/flattens all the sub lists into a single list containing
all the values found
* dict: aggregates/flattens all the key-value pairs from all the
dictionaries found into one big dictionary
WARNING: (if an inner keyName happens to be found in multiple
dictionaries from the aggregated list of dictionaries
it will be overwritten with the values from the last
one to be merged into the finally constructed dictionary)!
* str: will be accumulated in a list containing all the values found
WARNING: (will change the expected structure of the field from
a single string to a list of strings)!
:param wfDescr: Dictionary with the workflow description
:param docTemplate: Document template in the form of a list of tuples as follows:
[('KeyName', DefaultValue, type),
('KeyName', DefaultValue, type),
...]
To be used for identifying the fields to be searched for
in the workflow description
"""
# Convert back the so aggregated extDoc to the original structure:
for keyName, data in viewitems(self.extDoc):
if len(data['values']) == 0:
self.extDoc[keyName] = deepcopy(data['default'])
elif len(data['values']) == 1:
self.extDoc[keyName] = deepcopy(data['values'][0])
elif len(data['values']) > 1:
if data['type'] is bool:
self.extDoc[keyName] = any(data['values'])
elif data['type'] is list:
self.extDoc[keyName] = list(set(flattenList(data['values'])))
# WARNING: If it happens this list to be constructed out of elements
# which are instances of unhashable types (e.g. dict, list)
# the set() call will produce an ERR, but this is unlikely
# to happen, see [1] - All the fields we fetch from the
# so nested structure of Task/Step Chain dictionary are
# of hashable types.
# [1] https://github.com/dmwm/WMCore/blob/ed40d33069bdddcd98ed5b8430d5ca6662e5941f/src/python/WMCore/WMSpec/StdSpecs/StdBase.py#L1189
elif data['type'] is dict:
self.extDoc[keyName] = {}
for item in data['values']:
self.extDoc[keyName].update(item)
elif (isinstance(data['type'], tuple) and (bytes in data['type'] or str in data['type'])) or \
(data['type'] is bytes or data['type'] is str):
data['values'] = list(set(data['values']))
if len(data['values']) == 1:
self.extDoc[keyName] = deepcopy(data['values'][0])
else:
self.extDoc[keyName] = deepcopy(data['values'])
class MSRuleCleanerWflow(dict):
"""
A minimal workflow and transfer information representation to serve the needs
of the MSRuleCleaner Micro Service.
"""
def __init__(self, wfDescr, **kwargs):
super(MSRuleCleanerWflow, self).__init__(**kwargs)
# Search for all the keys we need from the ReqManager workflow description
wfParser = WfParser(self.docSchema())
myDoc = wfParser(wfDescr)
# Convert some fields to lists explicitly:
# NOTE: Those are fields defined as strings in the original workflow
# representation, but may turn into lists during the recursive
# search and we will use them as lists for the rest of the code.
for key in ['ParentDataset']:
if not isinstance(myDoc[key], list):
if myDoc[key] is None:
myDoc[key] = []
else:
myDoc[key] = [myDoc[key]]
self.update(myDoc)
def docSchema(self):
"""
Return the data schema for the document.
It's a tuple where:
* 1st element: is the key name / attribute in the request
* 2nd element: is the default value
* 3rd element: is the expected data type
Document format:
{
"RequestName": "ReqName",
"RequestType": "Type",
"SubRequestType": "Type",
"RequestStatus": "Status",
"OutputDatasets": [],
'RulesToClean': {'plineMSTrCont': [],
'plineMSTrBlock': [],
'plineAgentCont': [],
'plineAgentBlock': []},
'CleanupStatus': {'plineMSTrCont': False,
'plineMSTrBlock': False,
'plineAgentCont': False,
'plineAgentBlock': False},
"TransferDone": False # information - returned by the MSOutput REST call.
"TransferTape": False # information - fetched by Rucio about tape rules completion
"TapeRulesStatus": [('36805b823062415c8ee60300b0e60378', 'OK', '/AToZHToLLTTbar_MA-1900_MH-1200_TuneCP5_13TeV-amcatnlo-pythia8/RunIISummer20UL16RECO-106X_mcRun2_asymptotic_v13-v2/AODSIM'),
('5b75fb7503524449b0f304ea0e52f0de', 'STUCK', '/AToZHToLLTTbar_MA-1900_MH-1200_TuneCP5_13TeV-amcatnlo-pythia8/RunIISummer20UL16MiniAODv2-106X_mcRun2_asymptotic_v17-v2/MINIAODSIM')]
'TargetStatus': 'normal-archived' || 'rejected-achived' || 'aborted-archived',
'ParentageResolved': Bool,
'PlineMarkers': None,
'IsClean': False
'IsLogDBClean': False,
'IsArchivalDelayExpired': False,
'ForceArchive': False,
'RequestTransition': [],
'IncludeParents': False
'InputDataset': None,
'ParentDataset': []
}
:return: a list of tuples
"""
docTemplate = [
('RequestName', None, (bytes, str)),
('RequestType', None, (bytes, str)),
('SubRequestType', None, (bytes, str)),
('RequestStatus', None, (bytes, str)),
('OutputDatasets', [], list),
('RulesToClean', {}, dict),
('CleanupStatus', {}, dict),
('TransferDone', False, bool),
('TransferTape', False, bool),
('TapeRulesStatus', [], list),
('TargetStatus', None, (bytes, str)),
('ParentageResolved', True, bool),
('PlineMarkers', None, list),
('IsClean', False, bool),
('IsLogDBClean', False, bool),
('IsArchivalDelayExpired', False, bool),
('ForceArchive', False, bool),
('RequestTransition', [], list),
('IncludeParents', False, bool),
('InputDataset', None, (bytes, str)),
('ParentDataset', None, (bytes, str)),
('StatusAdvanceExpiredMsg', "", str)]
# NOTE: ParentageResolved is set by default to True it will be False only if:
# - RequestType is StepChain
# - The parent workflow is still in a transient status
# this should be one of the flags to be used to estimate if
# the workflow is good for archival
return docTemplate | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/MicroService/MSRuleCleaner/MSRuleCleanerWflow.py | 0.812272 | 0.503967 | MSRuleCleanerWflow.py | pypi |
from builtins import range
from WMCore.DataStructs.Run import Run
class Mask(dict):
"""
_Mask_
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
self.inclusive = True
self.setdefault("inclusivemask", True)
self.setdefault("FirstEvent", None)
self.setdefault("LastEvent", None)
self.setdefault("FirstLumi", None)
self.setdefault("LastLumi", None)
self.setdefault("FirstRun", None)
self.setdefault("LastRun", None)
self.setdefault("runAndLumis", {})
def setMaxAndSkipEvents(self, maxEvents, skipEvents):
"""
_setMaxAndSkipEvents_
Set FirstEvent & LastEvent fields as max & skip events
"""
self['FirstEvent'] = skipEvents
if maxEvents is not None:
self['LastEvent'] = skipEvents + maxEvents
return
def setMaxAndSkipLumis(self, maxLumis, skipLumi):
"""
_setMaxAndSkipLumis
Set the Maximum number of lumi sections and the starting point
"""
self['FirstLumi'] = skipLumi
self['LastLumi'] = skipLumi + maxLumis
return
def setMaxAndSkipRuns(self, maxRuns, skipRun):
"""
_setMaxAndSkipRuns
Set the Maximum number of runss and the starting point
"""
self['FirstRun'] = skipRun
self['LastRun'] = skipRun + maxRuns
return
def getMaxEvents(self):
"""
_getMaxEvents_
return maxevents setting
"""
if self['LastEvent'] is None or self['FirstEvent'] is None:
return None
return self['LastEvent'] - self['FirstEvent'] + 1
def getMax(self, keyType=None):
"""
_getMax_
returns the maximum number of runs/events/etc of the type of the type string
"""
if 'First%s' % (keyType) not in self:
return None
if self['First%s' % (keyType)] is None or self['Last%s' % (keyType)] is None:
return None
return self['Last%s' % (keyType)] - self['First%s' % (keyType)] + 1
def addRun(self, run):
"""
_addRun_
Add a run object
"""
run.lumis.sort()
firstLumi = run.lumis[0]
lastLumi = run.lumis[0]
for lumi in run.lumis:
if lumi <= lastLumi + 1:
lastLumi = lumi
else:
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
firstLumi = lumi
lastLumi = lumi
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
return
def addRunWithLumiRanges(self, run, lumiList):
"""
_addRunWithLumiRanges_
Add to runAndLumis with call signature
addRunWithLumiRanges(run=run, lumiList = [[start1,end1], [start2, end2], ...]
"""
self['runAndLumis'][run] = lumiList
return
def addRunAndLumis(self, run, lumis=None):
"""
_addRunAndLumis_
Add runs and lumis directly
TODO: The name of this function is a little misleading. If you pass a list of lumis
it ignores the content of the list and adds a range based on the max/min in
the list. Missing lumis in the list are ignored.
NOTE: If the new run/lumi range overlaps with the pre-existing lumi ranges in the
mask, no attempt is made to merge these together. This can result in a mask
with duplicate lumis.
"""
lumis = lumis or []
if not isinstance(lumis, list):
lumis = list(lumis)
if run not in self['runAndLumis']:
self['runAndLumis'][run] = []
self['runAndLumis'][run].append([min(lumis), max(lumis)])
return
def getRunAndLumis(self):
"""
_getRunAndLumis_
Return list of active runs and lumis
"""
return self['runAndLumis']
def runLumiInMask(self, run, lumi):
"""
_runLumiInMask_
See if a particular runLumi is in the mask
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return True
if run not in self['runAndLumis']:
return False
for pair in self['runAndLumis'][run]:
# Go through each max and min pair
if pair[0] <= lumi and pair[1] >= lumi:
# Then the lumi is bracketed
return True
return False
def filterRunLumisByMask(self, runs):
"""
_filterRunLumisByMask_
Pass a Mask a list of run objects, get back a list of
run objects that correspond to the actual mask allowed values
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return runs
runDict = {}
for r in runs:
if r.run in runDict:
runDict[r.run].extendLumis(r.lumis)
else:
runDict[r.run] = r
maskRuns = set(self["runAndLumis"].keys())
passedRuns = set([r.run for r in runs])
filteredRuns = maskRuns.intersection(passedRuns)
newRuns = set()
for runNumber in filteredRuns:
maskLumis = set()
for pair in self["runAndLumis"][runNumber]:
if pair[0] == pair[1]:
maskLumis.add(pair[0])
else:
maskLumis = maskLumis.union(list(range(pair[0], pair[1] + 1)))
filteredLumis = set(runDict[runNumber].lumis).intersection(maskLumis)
if len(filteredLumis) > 0:
filteredLumiEvents = [(lumi, runDict[runNumber].getEventsByLumi(lumi)) for lumi in filteredLumis]
newRuns.add(Run(runNumber, *filteredLumiEvents))
return newRuns | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/DataStructs/Mask.py | 0.704567 | 0.254295 | Mask.py | pypi |
from __future__ import absolute_import, division, print_function
from future.utils import listitems
import sys
import hashlib
import time
from functools import total_ordering
from Utils.Utilities import encodeUnicodeToBytes
from WMCore.DataStructs.WMObject import WMObject
@total_ordering
class WorkUnit(WMObject, dict):
"""
_WorkUnit_
Data object that contains details for a single work unit
corresponding to tables workunit and frl_workunit_assoc
"""
fieldsToCopy = ['taskid', 'retry_count', 'last_unit_count', 'last_submit_time', 'status', 'firstevent',
'lastevent', 'fileid']
fieldsForInfo = fieldsToCopy + ['run_lumi']
def __init__(self, taskID=None, retryCount=0, lastUnitCount=None, lastSubmitTime=int(time.time()),
status=0, firstEvent=1, lastEvent=sys.maxsize, fileid=None, runLumi=None):
super(WorkUnit, self).__init__(self)
self.setdefault('taskid', taskID)
self.setdefault('retry_count', retryCount)
self.setdefault('last_unit_count', lastUnitCount)
self.setdefault('last_submit_time', lastSubmitTime)
self.setdefault('status', status)
self.setdefault('firstevent', firstEvent)
self.setdefault('lastevent', lastEvent)
self.setdefault('fileid', fileid)
self.setdefault('run_lumi', runLumi)
def __lt__(self, rhs):
"""
Compare work units in task id, run, lumi, first event, last event
"""
if self['taskid'] != rhs['taskid']:
return self['taskid'] < rhs['taskid']
if self['run_lumi'].run != rhs['run_lumi'].run:
return self['run_lumi'].run < rhs['run_lumi'].run
if self['run_lumi'].lumis != rhs['run_lumi'].lumis:
return self['run_lumi'].lumis < rhs['run_lumi'].lumis
if self['first_event'] != rhs['first_event']:
return self['first_event'] < rhs['first_event']
return self['last_event'] < rhs['last_event']
def __eq__(self, rhs):
"""
Work unit is equal if it has the same task, run, and lumi
"""
return (self['taskid'] == rhs['taskid'] and self['run_lumi'].run == self['run_lumi'].run and
self['run_lumi'].lumis == self['run_lumi'].lumis and self['firstevent'] == rhs['firstevent'] and
self['lastevent'] == rhs['lastevent'])
def __hash__(self):
"""
Hash function for this dict.
"""
# Generate an immutable sorted string representing this object
# NOTE: the run object needs to be hashed
immutableSelf = []
for keyName in sorted(self):
if keyName == "run_lumi":
immutableSelf.append((keyName, hash(self[keyName])))
else:
immutableSelf.append((keyName, self[keyName]))
hashValue = hashlib.sha1(encodeUnicodeToBytes(str(immutableSelf)))
return int(hashValue.hexdigest()[:15], 16)
def json(self, thunker=None):
"""
_json_
Serialize the object. Only copy select fields and construct one new field.
"""
jsonDict = {k: self[k] for k in WorkUnit.fieldsToCopy}
jsonDict["run_lumi"] = {"run_number": self['run_lumi'].run, "lumis": self['run_lumi'].lumis}
return jsonDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker)
def getInfo(self):
"""
Returns: tuple of parameters for the work unit
"""
return tuple(self[x] for x in WorkUnit.fieldsForInfo) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/DataStructs/WorkUnit.py | 0.592195 | 0.15241 | WorkUnit.py | pypi |
from builtins import str, bytes
__all__ = []
from WMCore.DataStructs.Run import Run
from WMCore.DataStructs.WMObject import WMObject
class File(WMObject, dict):
"""
_File_
Data object that contains details for a single file
TODO
- use the decorator `from functools import total_ordering` after
dropping support for python 2.6
- then, drop __ne__, __le__, __gt__, __ge__
"""
def __init__(self, lfn="", size=0, events=0, checksums=None,
parents=None, locations=None, merged=False):
dict.__init__(self)
checksums = checksums or {}
self.setdefault("lfn", lfn)
self.setdefault("size", size)
self.setdefault("events", events)
self.setdefault("checksums", checksums)
self.setdefault('runs', set())
self.setdefault('merged', merged)
self.setdefault('last_event', 0)
self.setdefault('first_event', 0)
if locations is None:
self.setdefault("locations", set())
else:
self.setdefault("locations", locations)
if parents is None:
self.setdefault("parents", set())
else:
self.setdefault("parents", parents)
def addRun(self, run):
"""
_addRun_
run should be an instance of WMCore.DataStructs.Run
Add a run container to this file, tweak the run and lumi
keys to be max run and max lumi for backwards compat.
"""
if not isinstance(run, Run):
msg = "addRun argument must be of type WMCore.DataStructs.Run"
raise RuntimeError(msg)
addFlag = False
for runMember in self['runs']:
if runMember.run == run.run:
# this rely on Run object overwrite __add__ to update self
runMember + run
addFlag = True
if not addFlag:
self['runs'].add(run)
return
def load(self):
"""
A DataStructs file has nothing to load from, other implementations will
over-ride this method.
"""
if self['id']:
self['lfn'] = '/store/testing/%s' % self['id']
def save(self):
"""
A DataStructs file has nothing to save to, other implementations will
over-ride this method.
"""
pass
def setLocation(self, pnn):
# Make sure we don't add None, [], "" as file location
if pnn:
self['locations'] = self['locations'] | set(self.makelist(pnn))
def __eq__(self, rhs):
"""
File is equal if it has the same name
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] == rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] == rhs
return eq
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
thisHash = self['lfn'].__hash__()
return thisHash
def __lt__(self, rhs):
"""
Sort files based on lexicographical ordering of the value connected
to the 'lfn' key
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] < rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] < rhs
return eq
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
def json(self, thunker=None):
"""
_json_
Serialize the file object. This will convert all Sets() to lists and
weed out the internal data structures that don't need to be shared.
"""
fileDict = {"last_event": self["last_event"],
"first_event": self["first_event"],
"lfn": self["lfn"],
"locations": list(self["locations"]),
"id": self.get("id", None),
"checksums": self["checksums"],
"events": self["events"],
"merged": self["merged"],
"size": self["size"],
"runs": [],
"parents": []}
for parent in self["parents"]:
if isinstance(parent, (str, bytes)):
# Then for some reason, we're passing strings
# Done specifically for ErrorHandler
fileDict['parents'].append(parent)
elif thunker is None:
continue
else:
fileDict["parents"].append(thunker._thunk(parent))
for run in self["runs"]:
runDict = {"run_number": run.run,
"lumis": run.lumis}
fileDict["runs"].append(runDict)
return fileDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/DataStructs/File.py | 0.533884 | 0.174868 | File.py | pypi |
from builtins import str
from WMCore.DataStructs.WMObject import WMObject
class SummaryHistogram(WMObject):
"""
_SummaryHistogram_
Histogram object, provides familiar CRUD methods
which take care of most of the statistical
calculations when adding points, this object
can also be converted into a dictionary
for JSON documents. It knows how to combine
with other histograms and create itself from
a dictionary provided it has matching structure.
This is an interface, the real work is done
by the ContinuousSummaryHistogram and
DiscreteSummaryHistogram objects
"""
def __init__(self, title = None, xLabel = None):
"""
__init__
Initialize the elements in the object.
"""
# Meta-information about the histogram, it can be changed at any point
self.title = title
self.xLabel = xLabel
# These shouldn't be touched from anything outside the SummaryHistogram object and children classes
self.continuous = None
self.jsonInternal = None
self.data = {}
self.average = None
self.stdDev = None
return
def setTitle(self, newTitle):
"""
_setTitle_
Set the title
"""
self.title = newTitle
return
def setHorizontalLabel(self, xLabel):
"""
_setHorizontalLabel_
Set the label on the x axis
"""
self.xLabel = xLabel
return
def addPoint(self, xValue, yLabel):
"""
_addPoint_
Add a point to the histogram data, a histogram
can have many types of y values for the same x if
x is continuous otherwise it is only one yLabel.
They should be in a similar scale for best results.
"""
raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation")
def toJSON(self):
"""
_toJSON_
Return a dictionary which is compatible
with a JSON object
"""
if self.continuous is None:
raise TypeError("toJSON can't be called on a bare SummaryHistogram object")
# Get what the children classes did
jsonDict = {}
jsonDict['internalData'] = self.jsonInternal or {}
# Add the common things
jsonDict['title'] = self.title
jsonDict['xLabel'] = self.xLabel
jsonDict['continuous'] = self.continuous
jsonDict['data'] = self.data
jsonDict['stdDev'] = self.stdDev
jsonDict['average'] = self.average
return jsonDict
def __add__(self, other):
"""
__add__
Add two histograms, combine statistics.
"""
raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation")
def __str__(self):
"""
__str__
Return the str object of the JSON
"""
return str(self.toJSON()) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/DataStructs/MathStructs/SummaryHistogram.py | 0.844601 | 0.581749 | SummaryHistogram.py | pypi |
from __future__ import division
import math
from WMCore.DataStructs.MathStructs.SummaryHistogram import SummaryHistogram
from WMCore.Algorithms.MathAlgos import validateNumericInput
from WMCore.Algorithms.MathAlgos import calculateRunningAverageAndQValue, calculateStdDevFromQ
class ContinuousSummaryHistogram(SummaryHistogram):
"""
_ContinuousSummaryHistogram_
A histogram where there are continuous points
with certain frequency, it follows
that there is only one value in Y and
that the average and standard deviation are
not calculated on the frequency values but the X values.
"""
def __init__(self, title, xLabel, yLabel = None,
roundingDecimals = 2, nBins = None,
dropOutliers = False, sigmaLimit = 5,
storeHistogram = True):
"""
__init__
Initialize a more complex histogram structure, containing different
data to calculate online average and standard deviations. This data is also
stored in the JSON to allow rebuilding and adding histograms.
All histograms are binned when requested, the resolution can be specified
through nBins, otherwise the value used is the one recommended in:
Wand, M.P. (1997), "Data-Based Choice of Histogram Bin Width," The American Statistician, 51, 59-64.
If specified, outlier farther than sigmaLimit standard deviations from the
mean will not be included in the binned histogram.
"""
# Initialize the parent object
SummaryHistogram.__init__(self, title, xLabel)
# Indicate this is a discrete histogram
self.continuous = True
# Add data only used in the continuous version
self.yLabel = yLabel
self.nPoints = 0
self.QValue = None
self.average = None
# Configuration parameters for the continuous histograms
self.roundingDecimals = roundingDecimals
self.fixedNBins = nBins
self.dropOutliers = dropOutliers
self.sigmaLimit = sigmaLimit
self.binned = False
self.storeHistogram = storeHistogram
# Override initialization of some attributes
self.average = 0.0
self.stdDev = 0.0
return
def addPoint(self, xValue, yLabel = None):
"""
_addPoint_
Add a point from a continuous set (only-numbers allowed currently) to the histogram data,
calculate the running average and standard deviation.
If no y-label had been specified before, one must be supplied
otherwise the given y-label must be either None or equal
to the stored value.
"""
if self.binned:
# Points can't be added to binned histograms!
raise Exception("Points can't be added to binned histograms")
if self.yLabel is None and yLabel is None:
raise Exception("Some y-label must be stored for the histogram")
elif self.yLabel is None:
self.yLabel = yLabel
elif yLabel is not None and self.yLabel != yLabel:
raise Exception("Only one y-label is allowed on continuous histograms")
if not validateNumericInput(xValue):
# Do nothing if it is not a number
return
xValue = float(xValue)
xValue = round(xValue, self.roundingDecimals)
if self.storeHistogram:
if xValue not in self.data:
self.data[xValue] = 0
self.data[xValue] += 1
self.nPoints += 1
(self.average, self.QValue) = calculateRunningAverageAndQValue(xValue, self.nPoints, self.average, self.QValue)
return
def __add__(self, other):
#TODO: For HG1302, support multiple agents properly in the workload summary
raise NotImplementedError
def toJSON(self):
"""
_toJSON_
Bin the histogram if any, calculate the standard deviation. Store
the internal data needed for reconstruction of the histogram
from JSON and call superclass toJSON method.
"""
if self.nPoints:
self.stdDev = calculateStdDevFromQ(self.QValue, self.nPoints)
if not self.binned and self.storeHistogram:
self.binHistogram()
self.jsonInternal = {}
self.jsonInternal['yLabel'] = self.yLabel
self.jsonInternal['QValue'] = self.QValue
self.jsonInternal['nPoints'] = self.nPoints
return SummaryHistogram.toJSON(self)
def binHistogram(self):
"""
_binHistogram_
Histograms of continuous data must be binned,
this takes care of that using given or optimal parameters.
Note that this modifies the data object,
and points can't be added to the histogram after this.
"""
if not self.nPoints:
return
self.binned = True
# Number of bins can be specified or calculated based on number of points
nBins = self.fixedNBins
if nBins is None:
nBins = int(math.floor((5.0 / 3.0) * math.pow(self.nPoints, 1.0 / 3.0)))
# Define min and max
if not self.dropOutliers:
upperLimit = max(self.data.keys())
lowerLimit = min(self.data.keys())
else:
stdDev = calculateStdDevFromQ(self.QValue, self.nPoints)
upperLimit = self.average + (stdDev * self.sigmaLimit)
lowerLimit = self.average - (stdDev * self.sigmaLimit)
# Incremental delta
delta = abs(float(upperLimit - lowerLimit)) / nBins
# Build the bins, it's a list of tuples for now
bins = []
a = lowerLimit
b = lowerLimit + delta
while len(bins) < nBins:
bins.append((a, b))
a += delta
b += delta
# Go through data and populate the binned histogram
binnedHisto = {}
currentBin = 0
currentPoint = 0
sortedData = sorted(self.data.keys())
while currentPoint < len(sortedData):
point = sortedData[currentPoint]
encodedTuple = "%s,%s" % (bins[currentBin][0], bins[currentBin][1])
if encodedTuple not in binnedHisto:
binnedHisto[encodedTuple] = 0
if point > upperLimit or point < lowerLimit:
currentPoint += 1
elif currentBin == len(bins) - 1:
binnedHisto[encodedTuple] += self.data[point]
currentPoint += 1
elif point >= bins[currentBin][0] and point < bins[currentBin][1]:
binnedHisto[encodedTuple] += self.data[point]
currentPoint += 1
else:
currentBin += 1
self.data = binnedHisto
return | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/DataStructs/MathStructs/ContinuousSummaryHistogram.py | 0.753058 | 0.633354 | ContinuousSummaryHistogram.py | pypi |
from WMCore.DataStructs.MathStructs.SummaryHistogram import SummaryHistogram
from WMCore.Algorithms.MathAlgos import getAverageStdDev
class DiscreteSummaryHistogram(SummaryHistogram):
"""
_DiscreteSummaryHistogram_
A histogram where the data is organized by
a finite number of categories, it can have
many values for each category.
"""
def __init__(self, title, xLabel):
"""
__init__
Initialize a simpler histogram that only stores
the histogram. Everything else is calculated when the JSON is requested.
"""
# Initialize the parent object
SummaryHistogram.__init__(self, title, xLabel)
# Indicate this is a discrete histogram
self.continuous = False
# Add data only used in the discrete version
self.yLabels = set()
# Override initialization of some attributes
self.average = {}
self.stdDev = {}
return
def addPoint(self, xValue, yLabel):
"""
_addPoint_
Add point to discrete histogram,
x value is a category and therefore not rounded.
There can be many yLabel and standard deviations are
not calculated online. Histograms are always stored.
"""
if xValue not in self.data:
# Record the category
self.data[xValue] = {}
for label in self.yLabels:
self.data[xValue][label] = 0
if yLabel not in self.yLabels:
# Record the label
self.yLabels.add(yLabel)
self.average[yLabel] = 0.0
self.stdDev[yLabel] = 0.0
for category in self.data:
self.data[category][yLabel] = 0
self.data[xValue][yLabel] += 1
return
def __add__(self, other):
#TODO: For HG1302, support multiple agents properly in the workload summary
raise NotImplementedError
def toJSON(self):
"""
_toJSON_
Calculate average and standard deviation, store it
and call the parent class toJSON method
"""
for yLabel in self.yLabels:
numList = []
for xValue in self.data:
numList.append(self.data[xValue][yLabel])
(self.average[yLabel], self.stdDev[yLabel]) = getAverageStdDev(numList)
return SummaryHistogram.toJSON(self) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/src/python/WMCore/DataStructs/MathStructs/DiscreteSummaryHistogram.py | 0.702122 | 0.544801 | DiscreteSummaryHistogram.py | pypi |
import logging
import sys
from collections import Counter
from WMCore.Services.DBS.DBS3Reader import DBS3Reader
from WMCore.Services.Rucio.Rucio import Rucio
RUCIO_ACCT = "wma_prod"
RUCIO_HOST = "http://cms-rucio.cern.ch"
RUCIO_AUTH = "https://cms-rucio-auth.cern.ch"
DBS_URL = "https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader"
def loggerSetup(logLevel=logging.INFO):
"""
Return a logger which writes everything to stdout.
"""
logger = logging.getLogger(__name__)
outHandler = logging.StreamHandler(sys.stdout)
outHandler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(module)s: %(message)s"))
outHandler.setLevel(logLevel)
logger.addHandler(outHandler)
logger.setLevel(logLevel)
return logger
def getFromRucio(dataset, logger):
"""
Using the WMCore Rucio object and fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, value is the amount of files.
"""
rucio = Rucio(acct=RUCIO_ACCT,
hostUrl=RUCIO_HOST,
authUrl=RUCIO_AUTH,
configDict={'logger': logger})
result = dict()
for block in rucio.getBlocksInContainer(dataset):
data = rucio.getDID(block)
result.setdefault(block, data['length'])
return result
def getFromDBS(dataset, logger):
"""
Uses the WMCore DBS3Reader object to fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, and an inner dictionary
with the number of valid and invalid files. It also returns a total counter
for the number of valid and invalid files in the dataset.
"""
dbsReader = DBS3Reader(DBS_URL, logger)
result = dict()
dbsFilesCounter = Counter({'valid': 0, 'invalid': 0})
blocks = dbsReader.listFileBlocks(dataset)
for block in blocks:
data = dbsReader.dbs.listFileArray(block_name=block, validFileOnly=0, detail=True)
result.setdefault(block, Counter({'valid': 0, 'invalid': 0}))
for fileInfo in data:
if fileInfo['is_file_valid'] == 1:
result[block]['valid'] += 1
dbsFilesCounter['valid'] += 1
else:
result[block]['invalid'] += 1
dbsFilesCounter['invalid'] += 1
return result, dbsFilesCounter
def main():
"""
Expects a dataset name as input argument.
It then queries Rucio and DBS and compare their blocks and
number of files.
"""
if len(sys.argv) != 2:
print("A dataset name must be provided in the command line")
sys.exit(1)
datasetName = sys.argv[1]
logger = loggerSetup(logging.INFO)
rucioOutput = getFromRucio(datasetName, logger)
dbsOutput, dbsFilesCounter = getFromDBS(datasetName, logger)
logger.info("*** Dataset: %s", datasetName)
logger.info("Rucio file count : %s", sum(rucioOutput.values()))
logger.info("DBS file count : %s", dbsFilesCounter['valid'] + dbsFilesCounter['invalid'])
logger.info(" - valid files : %s", dbsFilesCounter['valid'])
logger.info(" - invalid files : %s", dbsFilesCounter['invalid'])
logger.info("Blocks in Rucio but not in DBS: %s", set(rucioOutput.keys()) - set(dbsOutput.keys()))
logger.info("Blocks in DBS but not in Rucio: %s", set(dbsOutput.keys()) - set(rucioOutput.keys()))
for blockname in rucioOutput:
if blockname not in dbsOutput:
logger.error("This block does not exist in DBS: %s", blockname)
continue
if rucioOutput[blockname] != sum(dbsOutput[blockname].values()):
logger.warning("Block with file mismatch: %s", blockname)
logger.warning("\tRucio: %s\t\tDBS: %s", rucioOutput[blockname], sum(dbsOutput[blockname].values()))
if __name__ == "__main__":
sys.exit(main()) | /reqmgr2ms-rulecleaner-2.2.4rc3.tar.gz/reqmgr2ms-rulecleaner-2.2.4rc3/bin/adhoc-scripts/checkDsetFileCount.py | 0.413477 | 0.306037 | checkDsetFileCount.py | pypi |
from textwrap import TextWrapper
from collections import OrderedDict
def twClosure(replace_whitespace=False,
break_long_words=False,
maxWidth=120,
maxLength=-1,
maxDepth=-1,
initial_indent=''):
"""
Deals with indentation of dictionaries with very long key, value pairs.
replace_whitespace: Replace each whitespace character with a single space.
break_long_words: If True words longer than width will be broken.
width: The maximum length of wrapped lines.
initial_indent: String that will be prepended to the first line of the output
Wraps all strings for both keys and values to 120 chars.
Uses 4 spaces indentation for both keys and values.
Nested dictionaries and lists go to next line.
"""
twr = TextWrapper(replace_whitespace=replace_whitespace,
break_long_words=break_long_words,
width=maxWidth,
initial_indent=initial_indent)
def twEnclosed(obj, ind='', depthReached=0, reCall=False):
"""
The inner function of the closure
ind: Initial indentation for the single output string
reCall: Flag to indicate a recursive call (should not be used outside)
"""
output = ''
if isinstance(obj, dict):
obj = OrderedDict(sorted(list(obj.items()),
key=lambda t: t[0],
reverse=False))
if reCall:
output += '\n'
ind += ' '
depthReached += 1
lengthReached = 0
for key, value in list(obj.items()):
lengthReached += 1
if lengthReached > maxLength and maxLength >= 0:
output += "%s...\n" % ind
break
if depthReached <= maxDepth or maxDepth < 0:
output += "%s%s: %s" % (ind,
''.join(twr.wrap(key)),
twEnclosed(value, ind, depthReached=depthReached, reCall=True))
elif isinstance(obj, (list, set)):
if reCall:
output += '\n'
ind += ' '
lengthReached = 0
for value in obj:
lengthReached += 1
if lengthReached > maxLength and maxLength >= 0:
output += "%s...\n" % ind
break
if depthReached <= maxDepth or maxDepth < 0:
output += "%s%s" % (ind, twEnclosed(value, ind, depthReached=depthReached, reCall=True))
else:
output += "%s\n" % str(obj) # join(twr.wrap(str(obj)))
return output
return twEnclosed
def twPrint(obj, maxWidth=120, maxLength=-1, maxDepth=-1):
"""
A simple caller of twClosure (see docstring for twClosure)
"""
twPrinter = twClosure(maxWidth=maxWidth,
maxLength=maxLength,
maxDepth=maxDepth)
print(twPrinter(obj))
def twFormat(obj, maxWidth=120, maxLength=-1, maxDepth=-1):
"""
A simple caller of twClosure (see docstring for twClosure)
"""
twFormatter = twClosure(maxWidth=maxWidth,
maxLength=maxLength,
maxDepth=maxDepth)
return twFormatter(obj) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/Utils/TwPrint.py | 0.757436 | 0.299387 | TwPrint.py | pypi |
import io
import os
import stat
import subprocess
import time
import zlib
from Utils.Utilities import decodeBytesToUnicode
def calculateChecksums(filename):
"""
_calculateChecksums_
Get the adler32 and crc32 checksums of a file. Return None on error
Process line by line and adjust for known signed vs. unsigned issues
http://docs.python.org/library/zlib.html
The cksum UNIX command line tool implements a CRC32 checksum that is
different than any of the python algorithms, therefore open cksum
in a subprocess and feed it the same chunks of data that are used
to calculate the adler32 checksum.
"""
adler32Checksum = 1 # adler32 of an empty string
cksumProcess = subprocess.Popen("cksum", stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# the lambda basically creates an iterator function with zero
# arguments that steps through the file in 4096 byte chunks
with open(filename, 'rb') as f:
for chunk in iter((lambda: f.read(4096)), b''):
adler32Checksum = zlib.adler32(chunk, adler32Checksum)
cksumProcess.stdin.write(chunk)
cksumProcess.stdin.close()
cksumProcess.wait()
cksumStdout = cksumProcess.stdout.read().split()
cksumProcess.stdout.close()
# consistency check on the cksum output
filesize = os.stat(filename)[stat.ST_SIZE]
if len(cksumStdout) != 2 or int(cksumStdout[1]) != filesize:
raise RuntimeError("Something went wrong with the cksum calculation !")
cksumStdout[0] = decodeBytesToUnicode(cksumStdout[0])
return (format(adler32Checksum & 0xffffffff, '08x'), cksumStdout[0])
def tail(filename, nLines=20):
"""
_tail_
A version of tail
Adapted from code on http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
assert nLines >= 0
pos, lines = nLines + 1, []
# make sure only valid utf8 encoded chars will be passed along
with io.open(filename, 'r', encoding='utf8', errors='ignore') as f:
while len(lines) <= nLines:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = list(f)
pos *= 2
text = "".join(lines[-nLines:])
return text
def getFileInfo(filename):
"""
_getFileInfo_
Return file info in a friendly format
"""
filestats = os.stat(filename)
fileInfo = {'Name': filename,
'Size': filestats[stat.ST_SIZE],
'LastModification': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_MTIME])),
'LastAccess': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_ATIME]))}
return fileInfo
def findMagicStr(filename, matchString):
"""
_findMagicStr_
Parse a log file looking for a pattern string
"""
with io.open(filename, 'r', encoding='utf8', errors='ignore') as logfile:
# TODO: can we avoid reading the whole file
for line in logfile:
if matchString in line:
yield line
def getFullPath(name, envPath="PATH"):
"""
:param name: file name
:param envPath: any environment variable specified for path (PATH, PYTHONPATH, etc)
:return: full path if it is under PATH env
"""
for path in os.getenv(envPath).split(os.path.pathsep):
fullPath = os.path.join(path, name)
if os.path.exists(fullPath):
return fullPath
return None | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/Utils/FileTools.py | 0.555556 | 0.398641 | FileTools.py | pypi |
from builtins import str, bytes
def portForward(port):
"""
Decorator wrapper function for port forwarding of the REST calls of any
function to a given port.
Currently there are three constraints for applying this decorator.
1. The function to be decorated must be defined within a class and not being a static method.
The reason for that is because we need to be sure the function's signature will
always include the class instance as its first argument.
2. The url argument must be present as the second one in the positional argument list
of the decorated function (right after the class instance argument).
3. The url must follow the syntax specifications in RFC 1808:
https://tools.ietf.org/html/rfc1808.html
If all of the above constraints are fulfilled and the url is part of the
urlMangleList, then the url is parsed and the port is substituted with the
one provided as an argument to the decorator's wrapper function.
param port: The port to which the REST call should be forwarded.
"""
def portForwardDecorator(callFunc):
"""
The actual decorator
"""
def portMangle(callObj, url, *args, **kwargs):
"""
Function used to check if the url coming with the current argument list
is to be forwarded and if so change the port to the one provided as an
argument to the decorator wrapper.
:param classObj: This is the class object (slef from within the class)
which is always to be present in the signature of a
public method. We will never use this argument, but
we need it there for not breaking the positional
argument order
:param url: This is the actual url to be (eventually) forwarded
:param *args: The positional argument list coming from the original function
:param *kwargs: The keywords argument list coming from the original function
"""
forwarded = False
try:
if isinstance(url, str):
urlToMangle = 'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace('.cern.ch/', '.cern.ch:%d/' % port, 1)
forwarded = True
elif isinstance(url, bytes):
urlToMangle = b'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace(b'.cern.ch/', b'.cern.ch:%d/' % port, 1)
forwarded = True
except Exception:
pass
if forwarded:
return callFunc(callObj, newUrl, *args, **kwargs)
else:
return callFunc(callObj, url, *args, **kwargs)
return portMangle
return portForwardDecorator
class PortForward():
"""
A class with a call method implementing a simple way to use the functionality
provided by the protForward decorator as a pure functional call:
EXAMPLE:
from Utils.PortForward import PortForward
portForwarder = PortForward(8443)
url = 'https://cmsweb-testbed.cern.ch/couchdb'
url = portForwarder(url)
"""
def __init__(self, port):
"""
The init method for the PortForward call class. This one is supposed
to simply provide an initial class instance with a logger.
"""
self.port = port
def __call__(self, url):
"""
The call method for the PortForward class
"""
def dummyCall(self, url):
return url
return portForward(self.port)(dummyCall)(self, url) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/Utils/PortForward.py | 0.825273 | 0.496277 | PortForward.py | pypi |
from builtins import object
from functools import reduce
class Functor(object):
"""
A simple functor class used to construct a function call which later to be
applied on an (any type) object.
NOTE:
It expects a function in the constructor and an (any type) object
passed to the run or __call__ methods, which methods once called they
construct and return the following function:
func(obj, *args, **kwargs)
NOTE:
All the additional arguments which the function may take must be set in
the __init__ method. If any of them are passed during run time an error
will be raised.
:func:
The function to which the rest of the constructor arguments are about
to be attached and then the newly created function will be returned.
- The function needs to take at least one parameter since the object
passed to the run/__call__ methods will always be put as a first
argument to the function.
:Example:
def adder(a, b, *args, **kwargs):
if args:
print("adder args: %s" % args)
if kwargs:
print("adder kwargs: %s" % kwargs)
res = a + b
return res
>>> x=Functor(adder, 8, 'foo', bar=True)
>>> x(2)
adder args: foo
adder kwargs: {'bar': True}
adder res: 10
10
>>> x
<Pipeline.Functor instance at 0x7f319bbaeea8>
"""
def __init__(self, func, *args, **kwargs):
"""
The init method for class Functor
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, obj):
"""
The call method for class Functor
"""
return self.run(obj)
def run(self, obj):
return self.func(obj, *self.args, **self.kwargs)
class Pipeline(object):
"""
A simple Functional Pipeline Class: applies a set of functions to an object,
where the output of every previous function is an input to the next one.
"""
# NOTE:
# Similar and inspiring approaches but yet some different implementations
# are discussed in the following two links [1] & [2]. With a quite good
# explanation in [1], which helped a lot. All in all at the bottom always
# sits the reduce function.
# [1]
# https://softwarejourneyman.com/python-function-pipelines.html
# [2]
# https://gitlab.com/mc706/functional-pipeline
def __init__(self, funcLine=None, name=None):
"""
:funcLine: A list of functions or Functors of function + arguments (see
the Class definition above) that are to be applied sequentially
to the object.
- If any of the elements of 'funcLine' is a function, a direct
function call with the object as an argument is performed.
- If any of the elements of 'funcLine' is a Functor, then the
first argument of the Functor constructor is the function to
be evaluated and the object is passed as a first argument to
the function with all the rest of the arguments passed right
after it eg. the following Functor in the funcLine:
Functor(func, 'foo', bar=True)
will result in the following function call later when the
pipeline is executed:
func(obj, 'foo', bar=True)
:Example:
(using the adder function from above and an object of type int)
>>> pipe = Pipeline([Functor(adder, 5),
Functor(adder, 6),
Functor(adder, 7, "extraArg"),
Functor(adder, 8, update=True)])
>>> pipe.run(1)
adder res: 6
adder res: 12
adder args: extraArg
adder res: 19
adder kwargs: {'update': True}
adder res: 27
"""
self.funcLine = funcLine or []
self.name = name
def getPipelineName(self):
"""
__getPipelineName__
"""
name = self.name or "Unnamed Pipeline"
return name
def run(self, obj):
return reduce(lambda obj, functor: functor(obj), self.funcLine, obj) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/Utils/Pipeline.py | 0.750918 | 0.499512 | Pipeline.py | pypi |
# system modules
import os
import ssl
import time
import logging
import traceback
# third part library
try:
import jwt
except ImportError:
traceback.print_exc()
jwt = None
from Utils.Utilities import encodeUnicodeToBytes
# prevent "SSL: CERTIFICATE_VERIFY_FAILED" error
# this will cause pylint warning W0212, therefore we ignore it above
ssl._create_default_https_context = ssl._create_unverified_context
def readToken(name=None):
"""
Read IAM token either from environment or file name
:param name: ether file name containing token or environment name which hold the token value.
If not provided it will be assumed to read token from IAM_TOKEN environment.
:return: token or None
"""
if name and os.path.exists(name):
token = None
with open(name, 'r', encoding='utf-8') as istream:
token = istream.read()
return token
if name:
return os.environ.get(name)
return os.environ.get("IAM_TOKEN")
def tokenData(token, url="https://cms-auth.web.cern.ch/jwk", audUrl="https://wlcg.cern.ch/jwt/v1/any"):
"""
inspect and extract token data
:param token: token string
:param url: IAM provider URL
:param audUrl: audience string
"""
if not token or not jwt:
return {}
if isinstance(token, str):
token = encodeUnicodeToBytes(token)
jwksClient = jwt.PyJWKClient(url)
signingKey = jwksClient.get_signing_key_from_jwt(token)
key = signingKey.key
headers = jwt.get_unverified_header(token)
alg = headers.get('alg', 'RS256')
data = jwt.decode(
token,
key,
algorithms=[alg],
audience=audUrl,
options={"verify_exp": True},
)
return data
def isValidToken(token):
"""
check if given token is valid or not
:param token: token string
:return: true or false
"""
tokenDict = {}
tokenDict = tokenData(token)
exp = tokenDict.get('exp', 0) # expire, seconds since epoch
if not exp or exp < time.time():
return False
return True
class TokenManager():
"""
TokenManager class handles IAM tokens
"""
def __init__(self,
name=None,
url="https://cms-auth.web.cern.ch/jwk",
audUrl="https://wlcg.cern.ch/jwt/v1/any",
logger=None):
"""
Token manager reads IAM tokens either from file or env.
It caches token along with expiration timestamp.
By default the env variable to use is IAM_TOKEN.
:param name: string representing either file or env where we should read token from
:param url: IAM provider URL
:param audUrl: audience string
:param logger: logger object or none to use default one
"""
self.name = name
self.url = url
self.audUrl = audUrl
self.expire = 0
self.token = None
self.logger = logger if logger else logging.getLogger()
try:
self.token = self.getToken()
except Exception as exc:
self.logger.exception("Failed to get token. Details: %s", str(exc))
def getToken(self):
"""
Return valid token and sets its expire timestamp
"""
if not self.token or not isValidToken(self.token):
self.token = readToken(self.name)
tokenDict = {}
try:
tokenDict = tokenData(self.token, url=self.url, audUrl=self.audUrl)
self.logger.debug(tokenDict)
except Exception as exc:
self.logger.exception(str(exc))
raise
self.expire = tokenDict.get('exp', 0)
return self.token
def getLifetime(self):
"""
Return reamaining lifetime of existing token
"""
return self.expire - int(time.time()) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/Utils/TokenManager.py | 0.66061 | 0.165863 | TokenManager.py | pypi |
from copy import copy
from builtins import object
from time import time
class MemoryCacheException(Exception):
def __init__(self, message):
super(MemoryCacheException, self).__init__(message)
class MemoryCache():
__slots__ = ["lastUpdate", "expiration", "_cache"]
def __init__(self, expiration, initialData=None):
"""
Initializes cache object
:param expiration: expiration time in seconds
:param initialData: initial value for the cache
"""
self.lastUpdate = int(time())
self.expiration = expiration
self._cache = initialData
def __contains__(self, item):
"""
Check whether item is in the current cache
:param item: a simple object (string, integer, etc)
:return: True if the object can be found in the cache, False otherwise
"""
return item in self._cache
def __getitem__(self, keyName):
"""
If the cache is a dictionary, return that item from the cache. Else, raise an exception.
:param keyName: the key name from the dictionary
"""
if isinstance(self._cache, dict):
return copy(self._cache.get(keyName))
else:
raise MemoryCacheException("Cannot retrieve an item from a non-dict MemoryCache object: {}".format(self._cache))
def reset(self):
"""
Resets the cache to its current data type
"""
if isinstance(self._cache, (dict, set)):
self._cache.clear()
elif isinstance(self._cache, list):
del self._cache[:]
else:
raise MemoryCacheException("The cache needs to be reset manually, data type unknown")
def isCacheExpired(self):
"""
Evaluate whether the cache has already expired, returning
True if it did, otherwise it returns False
"""
return self.lastUpdate + self.expiration < int(time())
def getCache(self):
"""
Raises an exception if the cache has expired, otherwise returns
its data
"""
if self.isCacheExpired():
expiredSince = int(time()) - (self.lastUpdate + self.expiration)
raise MemoryCacheException("Memory cache expired for %d seconds" % expiredSince)
return self._cache
def setCache(self, inputData):
"""
Refresh the cache with the content provided (refresh its expiration as well)
This method enforces the user to not change the cache data type
:param inputData: data to store in the cache
"""
if not isinstance(self._cache, type(inputData)):
raise TypeError("Current cache data type: %s, while new value is: %s" %
(type(self._cache), type(inputData)))
self.reset()
self.lastUpdate = int(time())
self._cache = inputData
def addItemToCache(self, inputItem):
"""
Adds new item(s) to the cache, without resetting its expiration.
It, of course, only works for data caches of type: list, set or dict.
:param inputItem: additional item to be added to the current cached data
"""
if isinstance(self._cache, set) and isinstance(inputItem, (list, set)):
# extend another list or set into a set
self._cache.update(inputItem)
elif isinstance(self._cache, set) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a set
self._cache.add(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (list, set)):
# extend another list or set into a list
self._cache.extend(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a list
self._cache.append(inputItem)
elif isinstance(self._cache, dict) and isinstance(inputItem, dict):
self._cache.update(inputItem)
else:
msg = "Input item type: %s cannot be added to a cache type: %s" % (type(self._cache), type(inputItem))
raise TypeError("Cache and input item data type mismatch. %s" % msg) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/Utils/MemoryCache.py | 0.774796 | 0.226185 | MemoryCache.py | pypi |
from builtins import object
import logging
import time
import calendar
from datetime import tzinfo, timedelta
def gmtimeSeconds():
"""
Return GMT time in seconds
"""
return int(time.mktime(time.gmtime()))
def encodeTimestamp(secs):
"""
Encode second since epoch to a string GMT timezone representation
:param secs: input timestamp value (either int or float) in seconds since epoch
:return: time string in GMT timezone representation
"""
if not isinstance(secs, (int, float)):
raise Exception("Wrong input, should be seconds since epoch either int or float value")
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(int(secs)))
def decodeTimestamp(timeString):
"""
Decode timestamps in provided document
:param timeString: timestamp string represention in GMT timezone, see encodeTimestamp
:return: seconds since ecouch in GMT timezone
"""
if not isinstance(timeString, str):
raise Exception("Wrong input, should be time string in GMT timezone representation")
return calendar.timegm(time.strptime(timeString, "%Y-%m-%dT%H:%M:%SZ"))
def timeFunction(func):
"""
source: https://www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
Decorator function to measure how long a method/function takes to run
It returns a tuple with:
* wall clock time spent
* returned result of the function
* the function name
"""
def wrapper(*arg, **kw):
t1 = time.time()
res = func(*arg, **kw)
t2 = time.time()
return round((t2 - t1), 4), res, func.__name__
return wrapper
class CodeTimer(object):
"""
A context manager for timing function calls.
Adapted from https://www.blog.pythonlibrary.org/2016/05/24/python-101-an-intro-to-benchmarking-your-code/
Use like
with CodeTimer(label='Doing something'):
do_something()
"""
def __init__(self, label='The function', logger=None):
self.start = time.time()
self.label = label
self.logger = logger or logging.getLogger()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
runtime = round((end - self.start), 3)
self.logger.info(f"{self.label} took {runtime} seconds to complete")
class LocalTimezone(tzinfo):
"""
A required python 2 class to determine current timezone for formatting rfc3339 timestamps
Required for sending alerts to the MONIT AlertManager
Can be removed once WMCore starts using python3
Details of class can be found at: https://docs.python.org/2/library/datetime.html#tzinfo-objects
"""
def __init__(self):
super(LocalTimezone, self).__init__()
self.ZERO = timedelta(0)
self.STDOFFSET = timedelta(seconds=-time.timezone)
if time.daylight:
self.DSTOFFSET = timedelta(seconds=-time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return self.ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0 | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/Utils/Timers.py | 0.817028 | 0.254266 | Timers.py | pypi |
import copy
import unittest
class ExtendedUnitTestCase(unittest.TestCase):
"""
Class that can be imported to switch to 'mock'ed versions of
services.
"""
def assertContentsEqual(self, expected_obj, actual_obj, msg=None):
"""
A nested object comparison without regard for the ordering of contents. It asserts that
expected_obj and actual_obj contain the same elements and that their sub-elements are the same.
However, all sequences are allowed to contain the same elements, but in different orders.
"""
def traverse_dict(dictionary):
for key, value in list(dictionary.items()):
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
return
def get_dict_sortkey(x):
if isinstance(x, dict):
return list(x.keys())
else:
return x
def traverse_list(theList):
for value in theList:
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
theList.sort(key=get_dict_sortkey)
return
if not isinstance(expected_obj, type(actual_obj)):
self.fail(msg="The two objects are different type and cannot be compared: %s and %s" % (
type(expected_obj), type(actual_obj)))
expected = copy.deepcopy(expected_obj)
actual = copy.deepcopy(actual_obj)
if isinstance(expected, dict):
traverse_dict(expected)
traverse_dict(actual)
elif isinstance(expected, list):
traverse_list(expected)
traverse_list(actual)
else:
self.fail(msg="The two objects are different type (%s) and cannot be compared." % type(expected_obj))
return self.assertEqual(expected, actual) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/Utils/ExtendedUnitTestCase.py | 0.664758 | 0.501587 | ExtendedUnitTestCase.py | pypi |
from builtins import str, bytes
import subprocess
import os
import re
import zlib
import base64
import sys
from types import ModuleType, FunctionType
from gc import get_referents
def lowerCmsHeaders(headers):
"""
Lower CMS headers in provided header's dict. The WMCore Authentication
code check only cms headers in lower case, e.g. cms-xxx-yyy.
"""
lheaders = {}
for hkey, hval in list(headers.items()): # perform lower-case
# lower header keys since we check lower-case in headers
if hkey.startswith('Cms-') or hkey.startswith('CMS-'):
lheaders[hkey.lower()] = hval
else:
lheaders[hkey] = hval
return lheaders
def makeList(stringList):
"""
_makeList_
Make a python list out of a comma separated list of strings,
throws a ValueError if the input is not well formed.
If the stringList is already of type list, then return it untouched.
"""
if isinstance(stringList, list):
return stringList
if isinstance(stringList, str):
toks = stringList.lstrip(' [').rstrip(' ]').split(',')
if toks == ['']:
return []
return [str(tok.strip(' \'"')) for tok in toks]
raise ValueError("Can't convert to list %s" % stringList)
def makeNonEmptyList(stringList):
"""
_makeNonEmptyList_
Given a string or a list of strings, return a non empty list of strings.
Throws an exception in case the final list is empty or input data is not
a string or a python list
"""
finalList = makeList(stringList)
if not finalList:
raise ValueError("Input data cannot be an empty list %s" % stringList)
return finalList
def strToBool(string):
"""
Try to convert different variations of True or False (including a string
type object) to a boolean value.
In short:
* True gets mapped from: True, "True", "true", "TRUE".
* False gets mapped from: False, "False", "false", "FALSE"
* anything else will fail
:param string: expects a boolean or a string, but it could be anything else
:return: a boolean value, or raise an exception if value passed in is not supported
"""
if string is False or string is True:
return string
elif string in ["True", "true", "TRUE"]:
return True
elif string in ["False", "false", "FALSE"]:
return False
raise ValueError("Can't convert to bool: %s" % string)
def safeStr(string):
"""
_safeStr_
Cast simple data (int, float, basestring) to string.
"""
if not isinstance(string, (tuple, list, set, dict)):
return str(string)
raise ValueError("We're not supposed to convert %s to string." % string)
def diskUse():
"""
This returns the % use of each disk partition
"""
diskPercent = []
df = subprocess.Popen(["df", "-klP"], stdout=subprocess.PIPE)
output = df.communicate()[0]
output = decodeBytesToUnicode(output).split("\n")
for x in output:
split = x.split()
if split != [] and split[0] != 'Filesystem':
diskPercent.append({'mounted': split[5], 'percent': split[4]})
return diskPercent
def numberCouchProcess():
"""
This returns the number of couch process
"""
ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
process = ps.communicate()[0]
process = decodeBytesToUnicode(process).count('couchjs')
return process
def rootUrlJoin(base, extend):
"""
Adds a path element to the path within a ROOT url
"""
if base:
match = re.match("^root://([^/]+)/(.+)", base)
if match:
host = match.group(1)
path = match.group(2)
newpath = os.path.join(path, extend)
newurl = "root://%s/%s" % (host, newpath)
return newurl
return None
def zipEncodeStr(message, maxLen=5120, compressLevel=9, steps=100, truncateIndicator=" (...)"):
"""
_zipEncodeStr_
Utility to zip a string and encode it.
If zipped encoded length is greater than maxLen,
truncate message until zip/encoded version
is within the limits allowed.
"""
message = encodeUnicodeToBytes(message)
encodedStr = zlib.compress(message, compressLevel)
encodedStr = base64.b64encode(encodedStr)
if len(encodedStr) < maxLen or maxLen == -1:
return encodedStr
compressRate = 1. * len(encodedStr) / len(base64.b64encode(message))
# Estimate new length for message zip/encoded version
# to be less than maxLen.
# Also, append truncate indicator to message.
truncateIndicator = encodeUnicodeToBytes(truncateIndicator)
strLen = int((maxLen - len(truncateIndicator)) / compressRate)
message = message[:strLen] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
# If new length is not short enough, truncate
# recursively by steps
while len(encodedStr) > maxLen:
message = message[:-steps - len(truncateIndicator)] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
return encodedStr
def getSize(obj):
"""
_getSize_
Function to traverse an object and calculate its total size in bytes
:param obj: a python object
:return: an integer representing the total size of the object
Code extracted from Stack Overflow:
https://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
"""
# Custom objects know their class.
# Function objects seem to know way too much, including modules.
# Exclude modules as well.
BLACKLIST = type, ModuleType, FunctionType
if isinstance(obj, BLACKLIST):
raise TypeError('getSize() does not take argument of type: '+ str(type(obj)))
seen_ids = set()
size = 0
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, BLACKLIST) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
return size
def decodeBytesToUnicode(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of bytes (i.e. in py2 `str` or
`future.types.newbytes.newbytes`, in py3 `bytes`), then it is converted to
a sequence of unicode codepoints.
This function is useful for cleaning input data when using the
"unicode sandwich" approach, which involves converting bytes (i.e. strings
of type sequence of bytes) to unicode (i.e. strings of type sequence of
unicode codepoints, in py2 `unicode` or `future.types.newstr.newstr`,
in py3 `str` ) as soon as possible when recieving input data, and
converting unicode back to bytes as late as possible.
achtung!:
- converting unicode back to bytes is not covered by this function
- converting unicode back to bytes is not always necessary. when in doubt,
do not do it.
Reference: https://nedbatchelder.com/text/unipain.html
py2:
- "errors" can be: "strict", "ignore", "replace",
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, bytes):
return value.decode("utf-8", errors)
return value
def decodeBytesToUnicodeConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call decodeBytesToUnicode(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply decodeBytesToUnicode,
maintaining brevity.
Parameters
----------
value : any
passed to decodeBytesToUnicode
errors: str
passed to decodeBytesToUnicode
condition: boolean of object with attribute __bool__()
if True, then we run decodeBytesToUnicode. Usually PY2/PY3
"""
if condition:
return decodeBytesToUnicode(value, errors)
return value
def encodeUnicodeToBytes(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of unicode (i.e. in py2 `unicode` or
`future.types.newstr.newstr`, in py3 `str`), then it is converted to
a sequence of bytes.
This function is useful for encoding output data when using the
"unicode sandwich" approach, which involves converting unicode (i.e. strings
of type sequence of unicode codepoints) to bytes (i.e. strings of type
sequence of bytes, in py2 `str` or `future.types.newbytes.newbytes`,
in py3 `bytes`) as late as possible when passing a string to a third-party
function that only accepts bytes as input (pycurl's curl.setop is an
example).
py2:
- "errors" can be: "strict", "ignore", "replace", "xmlcharrefreplace"
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace",
"xmlcharrefreplace", "namereplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, str):
return value.encode("utf-8", errors)
return value
def encodeUnicodeToBytesConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call encodeUnicodeToBytes(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply encodeUnicodeToBytes,
maintaining brevity.
Parameters
----------
value : any
passed to encodeUnicodeToBytes
errors: str
passed to encodeUnicodeToBytes
condition: boolean of object with attribute __bool__()
if True, then we run encodeUnicodeToBytes. Usually PY2/PY3
"""
if condition:
return encodeUnicodeToBytes(value, errors)
return value | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/Utils/Utilities.py | 0.53777 | 0.283586 | Utilities.py | pypi |
import json
import urllib
from urllib.parse import urlparse, parse_qs, quote_plus
from collections import defaultdict
from Utils.CertTools import cert, ckey
from dbs.apis.dbsClient import aggFileLumis, aggFileParents
from WMCore.Services.pycurl_manager import getdata as multi_getdata
from Utils.PortForward import PortForward
def dbsListFileParents(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileParents API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file parents
"""
urls = ['%s/fileparents?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileParents
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsListFileLumis(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileLumis API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file lumis
"""
urls = ['%s/filelumis?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileLumis
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsBlockOrigin(dbsUrl, blocks):
"""
Concurrent counter part of DBS files API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of block origins for a given parent lfns
"""
urls = ['%s/blockorigin?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = None
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsParentFilesGivenParentDataset(dbsUrl, parentDataset, fInfo):
"""
Obtain parent files for given fileInfo object
:param dbsUrl: DBS URL
:param parentDataset: parent dataset name
:param fInfo: file info object
:return: list of parent files for given file info object
"""
portForwarder = PortForward(8443)
urls = []
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
urls.append(portForwarder(url))
func = None
uKey = None
rdict = getUrls(urls, func, uKey)
parentFiles = defaultdict(set)
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
url = portForwarder(url)
if url in rdict:
pFileList = rdict[url]
pFiles = {x['logical_file_name'] for x in pFileList}
parentFiles[fileInfo['logical_file_name']] = \
parentFiles[fileInfo['logical_file_name']].union(pFiles)
return parentFiles
def getUrls(urls, aggFunc, uKey=None):
"""
Perform parallel DBS calls for given set of urls and apply given aggregation
function to the results.
:param urls: list of DBS urls to call
:param aggFunc: aggregation function
:param uKey: url parameter to use for final dictionary
:return: dictionary of resuls where keys are urls and values are obtained results
"""
data = multi_getdata(urls, ckey(), cert())
rdict = {}
for row in data:
url = row['url']
code = int(row.get('code', 200))
error = row.get('error')
if code != 200:
msg = f"Fail to query {url}. Error: {code} {error}"
raise RuntimeError(msg)
if uKey:
key = urlParams(url).get(uKey)
else:
key = url
data = row.get('data', [])
res = json.loads(data)
if aggFunc:
rdict[key] = aggFunc(res)
else:
rdict[key] = res
return rdict
def urlParams(url):
"""
Return dictionary of URL parameters
:param url: URL link
:return: dictionary of URL parameters
"""
parsedUrl = urlparse(url)
rdict = parse_qs(parsedUrl.query)
for key, vals in rdict.items():
if len(vals) == 1:
rdict[key] = vals[0]
return rdict | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Services/DBS/DBSUtils.py | 0.572484 | 0.162746 | DBSUtils.py | pypi |
from __future__ import (division, print_function)
from builtins import str, bytes
from Utils.Utilities import encodeUnicodeToBytes
from io import BytesIO
import re
import xml.etree.cElementTree as ET
int_number_pattern = re.compile(r'(^[0-9-]$|^[0-9-][0-9]*$)')
float_number_pattern = re.compile(r'(^[-]?\d+\.\d*$|^\d*\.{1,1}\d+$)')
def adjust_value(value):
"""
Change null value to None.
"""
pat_float = float_number_pattern
pat_integer = int_number_pattern
if isinstance(value, str):
if value == 'null' or value == '(null)':
return None
elif pat_float.match(value):
return float(value)
elif pat_integer.match(value):
return int(value)
else:
return value
else:
return value
def xml_parser(data, prim_key):
"""
Generic XML parser
:param data: can be of type "file object", unicode string or bytes string
"""
if isinstance(data, (str, bytes)):
stream = BytesIO()
data = encodeUnicodeToBytes(data, "ignore")
stream.write(data)
stream.seek(0)
else:
stream = data
context = ET.iterparse(stream)
for event, elem in context:
row = {}
key = elem.tag
if key != prim_key:
continue
row[key] = elem.attrib
get_children(elem, event, row, key)
elem.clear()
yield row
def get_children(elem, event, row, key):
"""
xml_parser helper function. It gets recursively information about
children for given element tag. Information is stored into provided
row for given key. The change of notations can be applied during
parsing step by using provided notations dictionary.
"""
for child in elem.getchildren():
child_key = child.tag
child_data = child.attrib
if not child_data:
child_dict = adjust_value(child.text)
else:
child_dict = child_data
if child.getchildren(): # we got grand-children
if child_dict:
row[key][child_key] = child_dict
else:
row[key][child_key] = {}
if isinstance(child_dict, dict):
newdict = {child_key: child_dict}
else:
newdict = {child_key: {}}
get_children(child, event, newdict, child_key)
row[key][child_key] = newdict[child_key]
else:
if not isinstance(row[key], dict):
row[key] = {}
row[key].setdefault(child_key, [])
row[key][child_key].append(child_dict)
child.clear() | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Services/TagCollector/XMLUtils.py | 0.567697 | 0.201794 | XMLUtils.py | pypi |
from __future__ import division
from builtins import object
from datetime import timedelta, datetime
import socket
import json
import logging
from WMCore.Services.pycurl_manager import RequestHandler
from Utils.Timers import LocalTimezone
class AlertManagerAPI(object):
"""
A class used to send alerts via the MONIT AlertManager API
"""
def __init__(self, alertManagerUrl, logger=None):
self.alertManagerUrl = alertManagerUrl
# sender's hostname is added as an annotation
self.hostname = socket.gethostname()
self.mgr = RequestHandler()
self.ltz = LocalTimezone()
self.headers = {"Content-Type": "application/json"}
self.validSeverity = ["high", "medium", "low"]
self.logger = logger if logger else logging.getLogger()
def sendAlert(self, alertName, severity, summary, description, service, tag="wmcore", endSecs=600, generatorURL=""):
"""
:param alertName: a unique name for the alert
:param severity: low, medium, high
:param summary: a short description of the alert
:param description: a longer informational message with details about the alert
:param service: the name of the service firing an alert
:param tag: a unique tag used to help route the alert
:param endSecs: how many minutes until the alarm is silenced
:param generatorURL: this URL will be sent to AlertManager and configured as a clickable "Source" link in the web interface
AlertManager JSON format reference: https://www.prometheus.io/docs/alerting/latest/clients/
[
{
"labels": {
"alertname": "<requiredAlertName>",
"<labelname>": "<labelvalue>",
...
},
"annotations": {
"<labelname>": "<labelvalue>",
...
},
"startsAt": "<rfc3339>", # optional, will be current time if not present
"endsAt": "<rfc3339>",
"generatorURL": "<generator_url>" # optional
},
]
"""
if not self._isValidSeverity(severity):
return False
request = []
alert = {}
labels = {}
annotations = {}
# add labels
labels["alertname"] = alertName
labels["severity"] = severity
labels["tag"] = tag
labels["service"] = service
alert["labels"] = labels
# add annotations
annotations["hostname"] = self.hostname
annotations["summary"] = summary
annotations["description"] = description
alert["annotations"] = annotations
# In python3 we won't need the LocalTimezone class
# Will change to d = datetime.now().astimezone() + timedelta(seconds=endSecs)
d = datetime.now(self.ltz) + timedelta(seconds=endSecs)
alert["endsAt"] = d.isoformat("T")
alert["generatorURL"] = generatorURL
request.append(alert)
# need to do this because pycurl_manager only accepts dict and encoded strings type
params = json.dumps(request)
res = self.mgr.getdata(self.alertManagerUrl, params=params, headers=self.headers, verb='POST')
return res
def _isValidSeverity(self, severity):
"""
Used to check if the severity of the alert matches the valid levels: low, medium, high
:param severity: severity of the alert
:return: True or False
"""
if severity not in self.validSeverity:
logging.critical("Alert submitted to AlertManagerAPI with invalid severity: %s", severity)
return False
return True | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Services/AlertManager/AlertManagerAPI.py | 0.810554 | 0.161849 | AlertManagerAPI.py | pypi |
from builtins import str
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class DBCreator(DBFormatter):
"""
_DBCreator_
Generic class for creating database tables.
"""
def __init__(self, logger, dbinterface):
"""
_init_
Call the constructor of the parent class and create empty dictionaries
to hold table create statements, constraint statements and insert
statements.
"""
DBFormatter.__init__(self, logger, dbinterface)
self.create = {}
self.constraints = {}
self.inserts = {}
self.indexes = {}
def execute(self, conn = None, transaction = False):
"""
_execute_
Generic method to create tables and constraints by execute
sql statements in the create, and constraints dictionaries.
Before execution the keys assigned to the tables in the self.create
dictionary are sorted, to offer the possibilitiy of executing
table creation in a certain order.
"""
# create tables
for i in sorted(self.create.keys()):
try:
self.dbi.processData(self.create[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.create[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# create indexes
for i in self.indexes:
try:
self.dbi.processData(self.indexes[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.indexes[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# set constraints
for i in self.constraints:
try:
self.dbi.processData(self.constraints[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.constraints[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# insert permanent data
for i in self.inserts:
try:
self.dbi.processData(self.inserts[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.inserts[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
return True
def __str__(self):
"""
_str_
Return a well formatted text representation of the schema held in the
self.create, self.constraints, self.inserts, self.indexes dictionaries.
"""
string = ''
for i in self.create, self.constraints, self.inserts, self.indexes:
for j in i:
string = string + i[j].lstrip() + '\n'
return string | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Database/DBCreator.py | 0.526586 | 0.233969 | DBCreator.py | pypi |
from __future__ import division, print_function
from builtins import str, object
try:
import mongomock
except ImportError:
# this library should only be required by unit tests
mongomock = None
from pymongo import MongoClient, errors, IndexModel
from pymongo.errors import ConnectionFailure
class MongoDB(object):
"""
A simple wrapper class for creating a connection to a MongoDB instance
"""
def __init__(self, database=None, server=None,
create=False, collections=None, testIndexes=False,
logger=None, mockMongoDB=False, **kwargs):
"""
:databases: A database Name to connect to
:server: The server url or a list of (server:port) pairs (see https://docs.mongodb.com/manual/reference/connection-string/)
:create: A flag to trigger a database creation (if missing) during
object construction, together with collections if present.
:collections: A list of tuples describing collections with indexes -
the first element is considered the collection name, all
the rest elements are considered as indexes
:testIndexes: A flag to trigger index test and eventually to create them
if missing (TODO)
:mockMongoDB: A flag to trigger a database simulation instead of trying
to connect to a real database server.
:logger: Logger
Here follows a short list of usefull optional parameters accepted by the
MongoClient which may be passed as keyword arguments to the current module:
:replicaSet: The name of the replica set to connect to. The driver will verify
that all servers it connects to match this name. Implies that the
hosts specified are a seed list and the driver should attempt to
find all members of the set. Defaults to None.
:port: The port number on which to connect. It is overwritten by the ports
defined in the Url string or from the tuples listed in the server list
:connect: If True, immediately begin connecting to MongoDB in the background.
Otherwise connect on the first operation.
:directConnection: If True, forces the client to connect directly to the specified MongoDB
host as a standalone. If False, the client connects to the entire
replica set of which the given MongoDB host(s) is a part.
If this is True and a mongodb+srv:// URI or a URI containing multiple
seeds is provided, an exception will be raised.
:username: A string
:password: A string
Although username and password must be percent-escaped in a MongoDB URI,
they must not be percent-escaped when passed as parameters. In this example,
both the space and slash special characters are passed as-is:
MongoClient(username="user name", password="pass/word")
"""
self.server = server
self.logger = logger
self.mockMongoDB = mockMongoDB
if mockMongoDB and mongomock is None:
msg = "You are trying to mock MongoDB, but you do not have mongomock in the python path."
self.logger.critical(msg)
raise ImportError(msg)
# NOTE: We need to explicitely check for server availiability.
# From pymongo Documentation: https://pymongo.readthedocs.io/en/stable/api/pymongo/mongo_client.html
# """
# ...
# Starting with version 3.0 the :class:`MongoClient`
# constructor no longer blocks while connecting to the server or
# servers, and it no longer raises
# :class:`~pymongo.errors.ConnectionFailure` if they are
# unavailable, nor :class:`~pymongo.errors.ConfigurationError`
# if the user's credentials are wrong. Instead, the constructor
# returns immediately and launches the connection process on
# background threads.
# ...
# """
try:
if mockMongoDB:
self.client = mongomock.MongoClient()
self.logger.info("NOTICE: MongoDB is set to use mongomock, instead of real database.")
else:
self.client = MongoClient(host=self.server, **kwargs)
self.client.server_info()
self.client.admin.command('ping')
except ConnectionFailure as ex:
msg = "Could not connect to MongoDB server: %s. Server not available. \n"
msg += "Giving up Now."
self.logger.error(msg, self.server)
raise ex from None
except Exception as ex:
msg = "Could not connect to MongoDB server: %s. Due to unknown reason: %s\n"
msg += "Giving up Now."
self.logger.error(msg, self.server, str(ex))
raise ex from None
self.create = create
self.testIndexes = testIndexes
self.dbName = database
self.collections = collections or []
self._dbConnect(database)
if self.create and self.collections:
for collection in self.collections:
self._collCreate(collection, database)
if self.testIndexes and self.collections:
for collection in self.collections:
self._indexTest(collection[0], collection[1])
def _indexTest(self, collection, index):
pass
def _collTest(self, coll, db):
# self[db].list_collection_names()
pass
def collCreate(self, coll):
"""
A public method for _collCreate
"""
self._collCreate(coll, self.database)
def _collCreate(self, coll, db):
"""
A function used to explicitly create a collection with the relevant
indexes - used to avoid the Lazy Creating from MongoDB and eventual issues
in case we end up with no indexed collection, especially ones missing
the (`unique` index parameter)
:coll: A tuple describing one collection with indexes -
The first element is considered to be the collection name, and all
the rest of the elements are considered to be indexes.
The indexes must be of type IndexModel. See pymongo documentation:
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.create_index
:db: The database name for the collection
"""
collName = coll[0]
collIndexes = list(coll[1:])
try:
self.client[db].create_collection(collName)
except errors.CollectionInvalid:
# this error is thrown in case of an already existing collection
msg = "Collection '{}' Already exists in database '{}'".format(coll, db)
self.logger.warning(msg)
if collIndexes:
for index in collIndexes:
if not isinstance(index, IndexModel):
msg = "ERR: Bad Index type for collection %s" % collName
raise errors.InvalidName
try:
self.client[db][collName].create_indexes(collIndexes)
except Exception as ex:
msg = "Failed to create indexes on collection: %s\n%s" % (collName, str(ex))
self.logger.error(msg)
raise ex
def _dbTest(self, db):
"""
Tests database connection.
"""
# Test connection (from mongoDB documentation):
# https://api.mongodb.com/python/3.4.0/api/pymongo/mongo_client.html
try:
# The 'ismaster' command is cheap and does not require auth.
self.client.admin.command('ismaster')
except errors.ConnectionFailure as ex:
msg = "Server not available: %s" % str(ex)
self.logger.error(msg)
raise ex
# Test for database existence
if db not in self.client.list_database_names():
msg = "Missing MongoDB databases: %s" % db
self.logger.error(msg)
raise errors.InvalidName
def _dbCreate(self, db):
# creating an empty collection in order to create the database
_initColl = self.client[db].create_collection('_initCollection')
_initColl.insert_one({})
# NOTE: never delete the _initCollection if you want the database to persist
# self.client[db].drop_collection('_initCollection')
def dbConnect(self):
"""
A public method for _dbConnect
"""
self._dbConnect(self.database)
def _dbConnect(self, db):
"""
The function to be used for the initial database connection creation and testing
"""
try:
setattr(self, db, self.client[db])
if not self.mockMongoDB:
self._dbTest(db)
except errors.ConnectionFailure as ex:
msg = "Could not connect to MongoDB server for database: %s\n%s\n" % (db, str(ex))
msg += "Giving up Now."
self.logger.error(msg)
raise ex
except errors.InvalidName as ex:
msg = "Could not connect to a missing MongoDB databases: %s\n%s" % (db, str(ex))
self.logger.error(msg)
if self.create:
msg = "Trying to create: %s" % db
self.logger.error(msg)
try:
# self._dbCreate(getattr(self, db))
self._dbCreate(db)
except Exception as exc:
msg = "Could not create MongoDB databases: %s\n%s\n" % (db, str(exc))
msg += "Giving up Now."
self.logger.error(msg)
raise exc
try:
self._dbTest(db)
except Exception as exc:
msg = "Second failure while testing %s\n%s\n" % (db, str(exc))
msg += "Giving up Now."
self.logger.error(msg)
raise exc
msg = "Database %s successfully created" % db
self.logger.error(msg)
except Exception as ex:
msg = "General Exception while trying to connect to : %s\n%s" % (db, str(ex))
self.logger.error(msg)
raise ex | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Database/MongoDB.py | 0.660829 | 0.271949 | MongoDB.py | pypi |
import logging
import time
from WMCore.DataStructs.WMObject import WMObject
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class Transaction(WMObject):
dbi = None
def __init__(self, dbinterface = None):
"""
Get the connection from the DBInterface and open a new transaction on it
"""
self.dbi = dbinterface
self.conn = None
self.transaction = None
def begin(self):
if self.conn == None:
self.conn = self.dbi.connection()
if self.conn.closed:
self.conn = self.dbi.connection()
if self.transaction == None:
self.transaction = self.conn.begin()
return
def processData(self, sql, binds={}):
"""
Propagates the request to the proper dbcore backend,
and performs checks for lost (or closed) connection.
"""
result = self.dbi.processData(sql, binds, conn = self.conn,
transaction = True)
return result
def commit(self):
"""
Commit the transaction and return the connection to the pool
"""
if not self.transaction == None:
self.transaction.commit()
if not self.conn == None:
self.conn.close()
self.conn = None
self.transaction = None
def rollback(self):
"""
To be called if there is an exception and you want to roll back the
transaction and return the connection to the pool
"""
if self.transaction:
self.transaction.rollback()
if self.conn:
self.conn.close()
self.conn = None
self.transaction = None
return
def rollbackForError(self):
"""
This is called when handling a major exception. This is because sometimes
you can end up in a situation where the transaction appears open, but is not. In
this case, calling a rollback on the transaction will cause an exception, which
then destroys all logging and shutdown of the actual code.
Use only in components.
"""
try:
self.rollback()
except:
pass
return | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Database/Transaction.py | 0.487063 | 0.150809 | Transaction.py | pypi |
from copy import copy
from Utils.IteratorTools import grouper
import WMCore.WMLogging
from WMCore.DataStructs.WMObject import WMObject
from WMCore.Database.ResultSet import ResultSet
class DBInterface(WMObject):
"""
Base class for doing SQL operations using a SQLAlchemy engine, or
pre-exisitng connection.
processData will take a (list of) sql statements and a (list of)
bind variable dictionaries and run the statements on the DB. If
necessary it will substitute binds into the sql (MySQL).
TODO:
Add in some suitable exceptions in one or two places
Test the hell out of it
Support executemany()
"""
logger = None
engine = None
def __init__(self, logger, engine):
self.logger = logger
self.logger.info ("Instantiating base WM DBInterface")
self.engine = engine
self.maxBindsPerQuery = 500
def buildbinds(self, sequence, thename, therest=[{}]):
"""
Build a list of binds. Can be used recursively, e.g.:
buildbinds(file, 'file', buildbinds(pnn, 'location'), {'lumi':123})
TODO: replace with an appropriate map function
"""
binds = []
for r in sequence:
for i in self.makelist(therest):
thebind = copy(i)
thebind[thename] = r
binds.append(thebind)
return binds
def executebinds(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executebinds_
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
if b == None:
resultProxy = connection.execute(s)
else:
resultProxy = connection.execute(s, b)
if returnCursor:
return resultProxy
result = ResultSet()
result.add(resultProxy)
resultProxy.close()
return result
def executemanybinds(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executemanybinds_
b is a list of dictionaries for the binds, e.g.:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
see: http://www.gingerandjohn.com/archives/2004/02/26/cx_oracle-executemany-example/
Can't executemany() selects - so do each combination of binds here instead.
This will return a list of sqlalchemy.engine.base.ResultProxy object's
one for each set of binds.
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
s = s.strip()
if s.lower().endswith('select', 0, 6):
"""
Trying to select many
"""
if returnCursor:
result = []
for bind in b:
result.append(connection.execute(s, bind))
else:
result = ResultSet()
for bind in b:
resultproxy = connection.execute(s, bind)
result.add(resultproxy)
resultproxy.close()
return self.makelist(result)
"""
Now inserting or updating many
"""
result = connection.execute(s, b)
return self.makelist(result)
def connection(self):
"""
Return a connection to the engine (from the connection pool)
"""
return self.engine.connect()
def processData(self, sqlstmt, binds={}, conn=None,
transaction=False, returnCursor=False):
"""
set conn if you already have an active connection to reuse
set transaction = True if you already have an active transaction
"""
connection = None
try:
if not conn:
connection = self.connection()
else:
connection = conn
result = []
# Can take either a single statement or a list of statements and binds
sqlstmt = self.makelist(sqlstmt)
binds = self.makelist(binds)
if len(sqlstmt) > 0 and (len(binds) == 0 or (binds[0] == {} or binds[0] == None)):
# Should only be run by create statements
if not transaction:
#WMCore.WMLogging.sqldebug("transaction created in DBInterface")
trans = connection.begin()
for i in sqlstmt:
r = self.executebinds(i, connection=connection,
returnCursor=returnCursor)
result.append(r)
if not transaction:
trans.commit()
elif len(binds) > len(sqlstmt) and len(sqlstmt) == 1:
#Run single SQL statement for a list of binds - use execute_many()
if not transaction:
trans = connection.begin()
for subBinds in grouper(binds, self.maxBindsPerQuery):
result.extend(self.executemanybinds(sqlstmt[0], subBinds,
connection=connection, returnCursor=returnCursor))
if not transaction:
trans.commit()
elif len(binds) == len(sqlstmt):
# Run a list of SQL for a list of binds
if not transaction:
trans = connection.begin()
for i, s in enumerate(sqlstmt):
b = binds[i]
r = self.executebinds(s, b, connection=connection,
returnCursor=returnCursor)
result.append(r)
if not transaction:
trans.commit()
else:
self.logger.exception(
"DBInterface.processData Nothing executed, problem with your arguments")
self.logger.exception(
"DBInterface.processData SQL = %s" % sqlstmt)
WMCore.WMLogging.sqldebug('DBInterface.processData sql is %s items long' % len(sqlstmt))
WMCore.WMLogging.sqldebug('DBInterface.processData binds are %s items long' % len(binds))
assert_value = False
if len(binds) == len(sqlstmt):
assert_value = True
WMCore.WMLogging.sqldebug('DBInterface.processData are binds and sql same length? : %s' % (assert_value))
WMCore.WMLogging.sqldebug('sql: %s\n binds: %s\n, connection:%s\n, transaction:%s\n' %
(sqlstmt, binds, connection, transaction))
WMCore.WMLogging.sqldebug('type check:\nsql: %s\n binds: %s\n, connection:%s\n, transaction:%s\n' %
(type(sqlstmt), type(binds), type(connection), type(transaction)))
raise Exception("""DBInterface.processData Nothing executed, problem with your arguments
Probably mismatched sizes for sql (%i) and binds (%i)""" % (len(sqlstmt), len(binds)))
finally:
if not conn and connection != None:
connection.close() # Return connection to the pool
return result | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Database/DBCore.py | 0.414069 | 0.245401 | DBCore.py | pypi |
import copy
from WMCore.Database.DBCore import DBInterface
from WMCore.Database.ResultSet import ResultSet
def bindVarCompare(a):
"""
_bindVarCompare_
Bind variables are represented as a tuple with the first element being the
variable name and the second being it's position in the query. We sort on
the position in the query.
"""
return a[1]
def stringLengthCompare(a):
"""
_stringLengthCompare_
Sort comparison function to sort strings by length.
Since we want to sort from longest to shortest, this must be reversed when used
"""
return len(a)
class MySQLInterface(DBInterface):
def substitute(self, origSQL, origBindsList):
"""
_substitute_
Transform as set of bind variables from a list of dictionaries to a list
of tuples:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
Will be transformed into:
b = [ ('value1a', 'value2a'), ('value1b', 'value2b')]
Don't need to substitute in the binds as executemany does that
internally. But the sql will also need to be reformatted, such that
:bind_name becomes %s.
See: http://www.devshed.com/c/a/Python/MySQL-Connectivity-With-Python/5/
"""
if origBindsList == None:
return origSQL, None
origBindsList = self.makelist(origBindsList)
origBind = origBindsList[0]
bindVarPositionList = []
updatedSQL = copy.copy(origSQL)
# We process bind variables from longest to shortest to avoid a shorter
# bind variable matching a longer one. For example if we have two bind
# variables: RELEASE_VERSION and RELEASE_VERSION_ID the former will
# match against the latter, causing problems. We'll sort the variable
# names by length to guard against this.
bindVarNames = list(origBind)
bindVarNames.sort(key=stringLengthCompare, reverse=True)
bindPositions = {}
for bindName in bindVarNames:
searchPosition = 0
while True:
bindPosition = origSQL.lower().find(":%s" % bindName.lower(),
searchPosition)
if bindPosition == -1:
break
if bindPosition not in bindPositions:
bindPositions[bindPosition] = 0
bindVarPositionList.append((bindName, bindPosition))
searchPosition = bindPosition + 1
searchPosition = 0
while True:
bindPosition = updatedSQL.lower().find(":%s" % bindName.lower(),
searchPosition)
if bindPosition == -1:
break
left = updatedSQL[0:bindPosition]
right = updatedSQL[bindPosition + len(bindName) + 1:]
updatedSQL = left + "%s" + right
bindVarPositionList.sort(key=bindVarCompare)
mySQLBindVarsList = []
for origBind in origBindsList:
mySQLBindVars = []
for bindVarPosition in bindVarPositionList:
mySQLBindVars.append(origBind[bindVarPosition[0]])
mySQLBindVarsList.append(tuple(mySQLBindVars))
return (updatedSQL, mySQLBindVarsList)
def executebinds(self, s = None, b = None, connection = None,
returnCursor = False):
"""
_executebinds_
Execute a SQL statement that has a single set of bind variables.
Transform the bind variables into the format that MySQL expects.
"""
s, b = self.substitute(s, b)
return DBInterface.executebinds(self, s, b, connection, returnCursor)
def executemanybinds(self, s = None, b = None, connection = None,
returnCursor = False):
"""
_executemanybinds_
Execute a SQL statement that has multiple sets of bind variables.
Transform the bind variables into the format that MySQL expects.
"""
newsql, binds = self.substitute(s, b)
return DBInterface.executemanybinds(self, newsql, binds, connection,
returnCursor) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Database/MySQLCore.py | 0.637031 | 0.431105 | MySQLCore.py | pypi |
from __future__ import print_function
from builtins import str, bytes, int
from future.utils import viewitems
from Utils.PythonVersion import PY2
import sys
import types
class _EmptyClass(object):
pass
class JSONThunker(object):
"""
_JSONThunker_
Converts an arbitrary object to <-> from a jsonable object.
Will, for the most part "do the right thing" about various instance objects
by storing their class information along with their data in a dict. Handles
a recursion limit to prevent infinite recursion.
self.passThroughTypes - stores a list of types that should be passed
through unchanged to the JSON parser
self.blackListedModules - a list of modules that should not be stored in
the JSON.
"""
def __init__(self):
self.passThroughTypes = (type(None),
bool,
int,
float,
complex,
str,
bytes,
)
# objects that inherit from dict should be treated as a dict
# they don't store their data in __dict__. There was enough
# of those classes that it warrented making a special case
self.dictSortOfObjects = (('WMCore.Datastructs.Job', 'Job'),
('WMCore.WMBS.Job', 'Job'),
('WMCore.Database.CMSCouch', 'Document'))
# ditto above, but for lists
self.listSortOfObjects = (('WMCore.DataStructs.JobPackage', 'JobPackage'),
('WMCore.WMBS.JobPackage', 'JobPackage'),)
self.foundIDs = {}
# modules we don't want JSONed
self.blackListedModules = ('sqlalchemy.engine.threadlocal',
'WMCore.Database.DBCore',
'logging',
'WMCore.DAOFactory',
'WMCore.WMFactory',
'WMFactory',
'WMCore.Configuration',
'WMCore.Database.Transaction',
'threading',
'datetime')
def checkRecursion(self, data):
"""
handles checking for infinite recursion
"""
if id(data) in self.foundIDs:
if self.foundIDs[id(data)] > 5:
self.unrecurse(data)
return "**RECURSION**"
else:
self.foundIDs[id(data)] += 1
return data
else:
self.foundIDs[id(data)] = 1
return data
def unrecurse(self, data):
"""
backs off the recursion counter if we're returning from _thunk
"""
try:
self.foundIDs[id(data)] -= 1
except:
print("Could not find count for id %s of type %s data %s" % (id(data), type(data), data))
raise
def checkBlackListed(self, data):
"""
checks to see if a given object is from a blacklisted module
"""
try:
# special case
if data.__class__.__module__ == 'WMCore.Database.CMSCouch' and data.__class__.__name__ == 'Document':
data.__class__ = type({})
return data
if data.__class__.__module__ in self.blackListedModules:
return "Blacklisted JSON object: module %s, name %s, str() %s" % \
(data.__class__.__module__, data.__class__.__name__, str(data))
else:
return data
except Exception:
return data
def thunk(self, toThunk):
"""
Thunk - turns an arbitrary object into a JSONable object
"""
self.foundIDs = {}
data = self._thunk(toThunk)
return data
def unthunk(self, data):
"""
unthunk - turns a previously 'thunked' object back into a python object
"""
return self._unthunk(data)
def handleSetThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
tempDict = {'thunker_encoded_json': True, 'type': 'set'}
tempDict['set'] = self._thunk(list(toThunk))
self.unrecurse(toThunk)
return tempDict
def handleListThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
for k, v in enumerate(toThunk):
toThunk[k] = self._thunk(v)
self.unrecurse(toThunk)
return toThunk
def handleDictThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
special = False
tmpdict = {}
for k, v in viewitems(toThunk):
if type(k) == type(int):
special = True
tmpdict['_i:%s' % k] = self._thunk(v)
elif type(k) == type(float):
special = True
tmpdict['_f:%s' % k] = self._thunk(v)
else:
tmpdict[k] = self._thunk(v)
if special:
toThunk['thunker_encoded_json'] = self._thunk(True)
toThunk['type'] = self._thunk('dict')
toThunk['dict'] = tmpdict
else:
toThunk.update(tmpdict)
self.unrecurse(toThunk)
return toThunk
def handleObjectThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
toThunk = self.checkBlackListed(toThunk)
if isinstance(toThunk, (str, bytes)):
# things that got blacklisted
return toThunk
if hasattr(toThunk, '__to_json__'):
# Use classes own json thunker
toThunk2 = toThunk.__to_json__(self)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, dict):
toThunk2 = self.handleDictObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, list):
# a mother thunking list
toThunk2 = self.handleListObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
else:
try:
thunktype = '%s.%s' % (toThunk.__class__.__module__,
toThunk.__class__.__name__)
tempDict = {'thunker_encoded_json': True, 'type': thunktype}
tempDict[thunktype] = self._thunk(toThunk.__dict__)
self.unrecurse(toThunk)
return tempDict
except Exception as e:
tempDict = {'json_thunk_exception_': "%s" % e}
self.unrecurse(toThunk)
return tempDict
def handleDictObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_dict': True,
'type': thunktype,
thunktype: {}}
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
for k, v in viewitems(data):
tempDict[thunktype][k] = self._thunk(v)
return tempDict
def handleDictObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_dict', False)
thunktype = data.pop('type', False)
for k, v in viewitems(data):
if k == thunktype:
for k2, v2 in viewitems(data[thunktype]):
value[k2] = self._unthunk(v2)
else:
value.__dict__[k] = self._unthunk(v)
return value
def handleListObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_list': True,
'type': thunktype,
thunktype: []}
for k, v in enumerate(data):
tempDict['thunktype'].append(self._thunk(v))
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
return tempDict
def handleListObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_list', False)
thunktype = data.pop('type')
for k, v in viewitems(data[thunktype]):
setattr(value, k, self._unthunk(v))
for k, v in viewitems(data):
if k == thunktype:
continue
value.__dict__ = self._unthunk(v)
return value
def _thunk(self, toThunk):
"""
helper function for thunk, does the actual work
"""
if isinstance(toThunk, self.passThroughTypes):
return toThunk
elif type(toThunk) is list:
return self.handleListThunk(toThunk)
elif type(toThunk) is dict:
return self.handleDictThunk(toThunk)
elif type(toThunk) is set:
return self.handleSetThunk(toThunk)
elif type(toThunk) is types.FunctionType:
self.unrecurse(toThunk)
return "function reference"
elif isinstance(toThunk, object):
return self.handleObjectThunk(toThunk)
else:
self.unrecurse(toThunk)
raise RuntimeError(type(toThunk))
def _unthunk(self, jsondata):
"""
_unthunk - does the actual work for unthunk
"""
if PY2 and type(jsondata) is str:
return jsondata.encode("utf-8")
if type(jsondata) is dict:
if 'thunker_encoded_json' in jsondata:
# we've got a live one...
if jsondata['type'] == 'set':
newSet = set()
for i in self._unthunk(jsondata['set']):
newSet.add(self._unthunk(i))
return newSet
if jsondata['type'] == 'dict':
# We have a "special" dict
data = {}
for k, v in viewitems(jsondata['dict']):
tmp = self._unthunk(v)
if k.startswith('_i:'):
data[int(k.lstrip('_i:'))] = tmp
elif k.startswith('_f:'):
data[float(k.lstrip('_f:'))] = tmp
else:
data[k] = tmp
return data
else:
# spawn up an instance.. good luck
# here be monsters
# inspired from python's pickle code
ourClass = self.getThunkedClass(jsondata)
value = _EmptyClass()
if hasattr(ourClass, '__from_json__'):
# Use classes own json loader
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = ourClass.__from_json__(value, jsondata, self)
elif 'thunker_encoded_json' in jsondata and 'is_dict' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleDictObjectUnThunk(value, jsondata)
elif 'thunker_encoded_json' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleListObjectUnThunk(value, jsondata)
else:
raise RuntimeError('Could not unthunk a class. Code to try was removed because it had errors.')
return value
else:
data = {}
for k, v in viewitems(jsondata):
data[k] = self._unthunk(v)
return data
else:
return jsondata
@staticmethod
def getThunkedClass(jsondata):
"""
Work out the class from it's thunked json representation
"""
module = jsondata['type'].rsplit('.', 1)[0]
name = jsondata['type'].rsplit('.', 1)[1]
if (module == 'WMCore.Services.Requests') and (name == JSONThunker):
raise RuntimeError("Attempted to unthunk a JSONThunker..")
__import__(module)
mod = sys.modules[module]
ourClass = getattr(mod, name)
return ourClass | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Wrappers/JsonWrapper/JSONThunker.py | 0.443118 | 0.360208 | JSONThunker.py | pypi |
from builtins import next, str, object
from future.utils import viewitems
import xml.parsers.expat
class Node(object):
"""
_Node_
Really simple DOM like container to simplify parsing the XML file
and formatting the character data without all the whitespace guff
"""
def __init__(self, name, attrs):
self.name = str(name)
self.attrs = {}
self.text = None
for k, v in viewitems(attrs):
self.attrs.__setitem__(str(k), str(v))
self.children = []
def __str__(self):
result = " %s %s \"%s\"\n" % (self.name, self.attrs, self.text)
for child in self.children:
result += str(child)
return result
def coroutine(func):
"""
_coroutine_
Decorator method used to prime coroutines
"""
def start(*args,**kwargs):
cr = func(*args,**kwargs)
next(cr)
return cr
return start
def xmlFileToNode(reportFile):
"""
_xmlFileToNode_
Use expat and the build coroutine to parse the XML file and build
a node structure
"""
node = Node("JobReports", {})
expat_parse(open(reportFile, 'rb'),
build(node))
return node
def expat_parse(f, target):
"""
_expat_parse_
Expat based XML parsing that feeds a node building coroutine
"""
parser = xml.parsers.expat.ParserCreate()
#parser.buffer_size = 65536
parser.buffer_text = True
# a leftover from the py2py3 migration - TO BE REMOVED
# parser.returns_unicode = False
parser.StartElementHandler = \
lambda name,attrs: target.send(('start',(name,attrs)))
parser.EndElementHandler = \
lambda name: target.send(('end',name))
parser.CharacterDataHandler = \
lambda data: target.send(('text',data))
parser.ParseFile(f)
@coroutine
def build(topNode):
"""
_build_
Node structure builder that is fed from the expat_parse method
"""
nodeStack = [topNode]
charCache = []
while True:
event, value = (yield)
if event == "start":
charCache = []
newnode = Node(value[0], value[1])
nodeStack[-1].children.append(newnode)
nodeStack.append(newnode)
elif event == "text":
charCache.append(value)
else: # end
nodeStack[-1].text = str(''.join(charCache)).strip()
nodeStack.pop()
charCache = [] | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Algorithms/ParseXMLFile.py | 0.592431 | 0.276608 | ParseXMLFile.py | pypi |
from __future__ import print_function, division
from builtins import str, range
import math
import decimal
import logging
from WMCore.WMException import WMException
class MathAlgoException(WMException):
"""
Some simple math algo exceptions
"""
pass
def getAverageStdDev(numList):
"""
_getAverageStdDev_
Given a list, calculate both the average and the
standard deviation.
"""
if len(numList) < 0:
# Nothing to do here
return 0.0, 0.0
total = 0.0
average = 0.0
stdBase = 0.0
# Assemble the average
skipped = 0
for value in numList:
try:
if math.isnan(value) or math.isinf(value):
skipped += 1
continue
else:
total += value
except TypeError:
msg = "Attempted to take average of non-numerical values.\n"
msg += "Expected int or float, got %s: %s" % (value.__class__, value)
logging.error(msg)
logging.debug("FullList: %s", numList)
raise MathAlgoException(msg)
length = len(numList) - skipped
if length < 1:
return average, total
average = total / length
for value in numList:
tmpValue = value - average
stdBase += (tmpValue * tmpValue)
stdDev = math.sqrt(stdBase / length)
if math.isnan(average) or math.isinf(average):
average = 0.0
if math.isnan(stdDev) or math.isinf(average) or not decimal.Decimal(str(stdDev)).is_finite():
stdDev = 0.0
if not isinstance(stdDev, (int, float)):
stdDev = 0.0
return average, stdDev
def createHistogram(numList, nBins, limit):
"""
_createHistogram_
Create a histogram proxy (a list of bins) for a
given list of numbers
"""
average, stdDev = getAverageStdDev(numList = numList)
underflow = []
overflow = []
histEvents = []
histogram = []
for value in numList:
if math.fabs(average - value) <= limit * stdDev:
# Then we counted this event
histEvents.append(value)
elif average < value:
overflow.append(value)
elif average > value:
underflow.append(value)
if len(underflow) > 0:
binAvg, binStdDev = getAverageStdDev(numList=underflow)
histogram.append({'type': 'underflow',
'average': binAvg,
'stdDev': binStdDev,
'nEvents': len(underflow)})
if len(overflow) > 0:
binAvg, binStdDev = getAverageStdDev(numList=overflow)
histogram.append({'type': 'overflow',
'average': binAvg,
'stdDev': binStdDev,
'nEvents': len(overflow)})
if len(histEvents) < 1:
# Nothing to do?
return histogram
histEvents.sort()
upperBound = max(histEvents)
lowerBound = min(histEvents)
if lowerBound == upperBound:
# This is a problem
logging.debug("Only one value in the histogram!")
nBins = 1
upperBound = upperBound + 1
lowerBound = lowerBound - 1
binSize = (upperBound - lowerBound)/nBins
binSize = floorTruncate(binSize)
for x in range(nBins):
lowerEdge = floorTruncate(lowerBound + (x * binSize))
histogram.append({'type': 'standard',
'lowerEdge': lowerEdge,
'upperEdge': lowerEdge + binSize,
'average': 0.0,
'stdDev': 0.0,
'nEvents': 0})
for bin_ in histogram:
if bin_['type'] != 'standard':
continue
binList = []
for value in histEvents:
if value >= bin_['lowerEdge'] and value <= bin_['upperEdge']:
# Then we're in the bin
binList.append(value)
elif value > bin_['upperEdge']:
# Because this is a sorted list we are now out of the bin range
# Calculate our values and break
break
else:
continue
# If we get here, it's because we're out of values in the bin
# Time to do some math
if len(binList) < 1:
# Nothing to do here, leave defaults
continue
binAvg, binStdDev = getAverageStdDev(numList=binList)
bin_['average'] = binAvg
bin_['stdDev'] = binStdDev
bin_['nEvents'] = len(binList)
return histogram
def floorTruncate(value, precision=3):
"""
_floorTruncate_
Truncate a value to a set number of decimal points
Always truncates to a LOWER value, this is so that using it for
histogram binning creates values beneath the histogram lower edge.
"""
prec = math.pow(10, precision)
return math.floor(value * prec)/prec
def sortDictionaryListByKey(dictList, key, reverse=False):
"""
_sortDictionaryListByKey_
Given a list of dictionaries and a key with a numerical
value, sort that dictionary in order of that key's value.
NOTE: If the key does not exist, this will not raise an exception
This is because this is used for sorting of performance histograms
And not all histograms have the same value
"""
return sorted(dictList, key=lambda k: float(k.get(key, 0.0)), reverse=reverse)
def getLargestValues(dictList, key, n=1):
"""
_getLargestValues_
Take a list of dictionaries, sort them by the value of a
particular key, and return the n largest entries.
Key must be a numerical key.
"""
sortedList = sortDictionaryListByKey(dictList=dictList, key=key, reverse=True)
return sortedList[:n]
def validateNumericInput(value):
"""
_validateNumericInput_
Check that the value is actually an usable number
"""
value = float(value)
try:
if math.isnan(value) or math.isinf(value):
return False
except TypeError:
return False
return True
def calculateRunningAverageAndQValue(newPoint, n, oldM, oldQ):
"""
_calculateRunningAverageAndQValue_
Use the algorithm described in:
Donald E. Knuth (1998). The Art of Computer Programming, volume 2: Seminumerical Algorithms, 3rd ed.., p. 232. Boston: Addison-Wesley.
To calculate an average and standard deviation while getting data, the standard deviation
can be obtained from the so-called Q value with the following equation:
sigma = sqrt(Q/n)
This is also contained in the function calculateStdDevFromQ in this module. The average is equal to M.
"""
if not validateNumericInput(newPoint): raise MathAlgoException("Provided a non-valid newPoint")
if not validateNumericInput(n): raise MathAlgoException("Provided a non-valid n")
if n == 1:
M = newPoint
Q = 0.0
else:
if not validateNumericInput(oldM): raise MathAlgoException("Provided a non-valid oldM")
if not validateNumericInput(oldQ): raise MathAlgoException("Provided a non-valid oldQ")
M = oldM + (newPoint - oldM) / n
Q = oldQ + ((n - 1) * (newPoint - oldM) * (newPoint - oldM) / n)
return M, Q
def calculateStdDevFromQ(Q, n):
"""
_calculateStdDevFromQ_
If Q is the sum of the squared differences of some points to their average,
then the standard deviation is given by:
sigma = sqrt(Q/n)
This function calculates that formula
"""
if not validateNumericInput(Q): raise MathAlgoException("Provided a non-valid Q")
if not validateNumericInput(n): raise MathAlgoException("Provided a non-valid n")
sigma = math.sqrt(Q / n)
if not validateNumericInput(sigma): return 0.0
return sigma | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/Algorithms/MathAlgos.py | 0.591841 | 0.33565 | MathAlgos.py | pypi |
from builtins import str as newstr
import random, cherrypy
class RESTError(Exception):
"""Base class for REST errors.
.. attribute:: http_code
Integer, HTTP status code for this error. Also emitted as X-Error-HTTP
header value.
.. attribute:: app_code
Integer, application error code, to be emitted as X-REST-Status header.
.. attribute:: message
String, information about the error, to be emitted as X-Error-Detail
header. Should not contain anything sensitive, and in particular should
never include any unvalidated or unsafe data, e.g. input parameters or
data from a database. Normally a fixed label with one-to-one match with
the :obj:`app-code`. If the text exceeds 200 characters, it's truncated.
Since this is emitted as a HTTP header, it cannot contain newlines or
anything encoding-dependent.
.. attribute:: info
String, additional information beyond :obj:`message`, to be emitted as
X-Error-Info header. Like :obj:`message` should not contain anything
sensitive or unsafe, or text inappropriate for a HTTP response header,
and should be short enough to fit in 200 characters. This is normally
free form text to clarify why the error happened.
.. attribute:: errid
String, random unique identifier for this error, to be emitted as
X-Error-ID header and output into server logs when logging the error.
The purpose is that clients save this id when they receive an error,
and further error reporting or debugging can use this value to identify
the specific error, and for example to grep logs for more information.
.. attribute:: errobj
If the problem was caused by another exception being raised in the code,
reference to the original exception object. For example if the code dies
with an :class:`KeyError`, this is the original exception object. This
error is logged to the server logs when reporting the error, but no
information about it is returned to the HTTP client.
.. attribute:: trace
The origin of the exception as returned by :func:`format_exc`. The full
trace is emitted to the server logs, each line prefixed with timestamp.
This information is not returned to the HTTP client.
"""
http_code = None
app_code = None
message = None
info = None
errid = None
errobj = None
trace = None
def __init__(self, info = None, errobj = None, trace = None):
self.errid = "%032x" % random.randrange(1 << 128)
self.errobj = errobj
self.info = info
self.trace = trace
def __str__(self):
return "%s %s [HTTP %d, APP %d, MSG %s, INFO %s, ERR %s]" \
% (self.__class__.__name__, self.errid, self.http_code, self.app_code,
repr(self.message).replace("\n", " ~~ "),
repr(self.info).replace("\n", " ~~ "),
repr(self.errobj).replace("\n", " ~~ "))
class NotAcceptable(RESTError):
"Client did not specify format it accepts, or no compatible format was found."
http_code = 406
app_code = 201
message = "Not acceptable"
class UnsupportedMethod(RESTError):
"Client used HTTP request method which isn't supported for any API call."
http_code = 405
app_code = 202
message = "Request method not supported"
class MethodWithoutQueryString(RESTError):
"Client provided a query string which isn't acceptable for this request method."
http_code = 405
app_code = 203
message = "Query arguments not supported for this request method"
class APIMethodMismatch(RESTError):
"""Both the API and HTTP request methods are supported, but not in that
combination."""
http_code = 405
app_code = 204
message = "API not supported for this request method"
class APINotSpecified(RESTError):
"The request URL is missing API argument."
http_code = 400
app_code = 205
message = "API not specified"
class NoSuchInstance(RESTError):
"""The request URL is missing instance argument or the specified instance
does not exist."""
http_code = 404
app_code = 206
message = "No such instance"
class APINotSupported(RESTError):
"The request URL provides wrong API argument."
http_code = 404
app_code = 207
message = "API not supported"
class DataCacheEmpty(RESTError):
"The wmstats data cache has not be created."
http_code = 503
app_code = 208
message = "DataCache is Empty"
class DatabaseError(RESTError):
"""Parent class for database-related errors.
.. attribute: lastsql
A tuple of *(sql, binds, kwbinds),* where `sql` is the last SQL statement
executed and `binds`, `kwbinds` are the bind values used with it. Any
sensitive parts like passwords have already been censored from the `sql`
string. Note that for massive requests `binds` or `kwbinds` can get large.
These are logged out in the server logs when reporting the error, but no
information about these are returned to the HTTP client.
.. attribute: intance
String, the database instance for which the error occurred. This is
reported in the error message output to server logs, but no information
about this is returned to the HTTP client."""
lastsql = None
instance = None
def __init__(self, info = None, errobj = None, trace = None,
lastsql = None, instance = None):
RESTError.__init__(self, info, errobj, trace)
self.lastsql = lastsql
self.instance = instance
class DatabaseUnavailable(DatabaseError):
"""The instance argument is correct, but cannot connect to the database.
This error will only occur at initial attempt to connect to the database,
:class:`~.DatabaseConnectionError` is raised instead if the connection
ends prematurely after the transaction has already begun successfully."""
http_code = 503
app_code = 401
message = "Database unavailable"
class DatabaseConnectionError(DatabaseError):
"""Database was available when the operation started, but the connection
was lost or otherwise failed during the application operation."""
http_code = 504
app_code = 402
message = "Database connection failure"
class DatabaseExecutionError(DatabaseError):
"""Database operation failed."""
http_code = 500
app_code = 403
message = "Execution error"
class MissingParameter(RESTError):
"Client did not supply a parameter which is required."
http_code = 400
app_code = 301
message = "Missing required parameter"
class InvalidParameter(RESTError):
"Client supplied invalid value for a parameter."
http_code = 400
app_code = 302
message = "Invalid input parameter"
class MissingObject(RESTError):
"""An object required for the operation is missing. This might be a
pre-requisite needed to create a reference, or attempt to delete
an object which does not exist."""
http_code = 400
app_code = 303
message = "Required object is missing"
class TooManyObjects(RESTError):
"""Too many objects matched specified criteria. Usually this means
more than one object was matched, deleted, or inserted, when only
exactly one should have been subject to the operation."""
http_code = 400
app_code = 304
message = "Too many objects"
class ObjectAlreadyExists(RESTError):
"""An already existing object is on the way of the operation. This
is usually caused by uniqueness constraint violations when creating
new objects."""
http_code = 400
app_code = 305
message = "Object already exists"
class InvalidObject(RESTError):
"The specified object is invalid."
http_code = 400
app_code = 306
message = "Invalid object"
class ExecutionError(RESTError):
"""Input was in principle correct but there was an error processing
the request. This normally means either programming error, timeout, or
an unusual and unexpected problem with the database. For security reasons
little additional information is returned. If the problem persists, client
should contact service operators. The returned error id can be used as a
reference."""
http_code = 500
app_code = 403
message = "Execution error"
def report_error_header(header, val):
"""If `val` is non-empty, set CherryPy response `header` to `val`.
Replaces all newlines with "; " characters. If the resulting value is
longer than 200 characters, truncates it to the first 197 characters
and leaves a trailing ellipsis "..."."""
if val:
val = val.replace("\n", "; ")
if len(val) > 200: val = val[:197] + "..."
cherrypy.response.headers[header] = val
def report_rest_error(err, trace, throw):
"""Report a REST error: generate an appropriate log message, set the
response headers and raise an appropriate :class:`~.HTTPError`.
Normally `throw` would be True to translate the exception `err` into
a HTTP server error, but the function can also be called with `throw`
set to False if the purpose is merely to log an exception message.
:arg err: exception object.
:arg trace: stack trace to use in case `err` doesn't have one.
:arg throw: raise a :class:`~.HTTPError` if True."""
if isinstance(err, DatabaseError) and err.errobj:
offset = None
sql, binds, kwbinds = err.lastsql
if sql and err.errobj.args and hasattr(err.errobj.args[0], 'offset'):
offset = err.errobj.args[0].offset
sql = sql[:offset] + "<**>" + sql[offset:]
cherrypy.log("SERVER DATABASE ERROR %d/%d %s %s.%s %s [instance: %s] (%s);"
" last statement: %s; binds: %s, %s; offset: %s"
% (err.http_code, err.app_code, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
err.errid, err.instance, newstr(err.errobj).rstrip(),
sql, binds, kwbinds, offset))
for line in err.trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, RESTError):
if err.errobj:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s); derived from %s.%s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
newstr(err.errobj).rstrip()))
trace = err.trace
else:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, cherrypy.HTTPError):
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER HTTP ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(200)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.status)
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", err._message)
if throw: raise err
else:
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER OTHER ERROR %s.%s %s (%s)"
% (getattr(err, "__module__", "__builtins__"),
err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = 400
cherrypy.response.headers["X-Error-HTTP"] = 500
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", "Server error")
if throw: raise cherrypy.HTTPError(500, "Server error") | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/REST/Error.py | 0.835752 | 0.247783 | Error.py | pypi |
from builtins import str as newstr, bytes as newbytes
from WMCore.REST.Error import *
import math
import re
import numbers
from Utils.Utilities import decodeBytesToUnicodeConditional, encodeUnicodeToBytesConditional
from Utils.PythonVersion import PY3, PY2
def return_message(main_err, custom_err):
if custom_err:
return custom_err
return main_err
def _arglist(argname, kwargs):
val = kwargs.get(argname, None)
if val == None:
return []
elif not isinstance(val, list):
return [ val ]
else:
return val
def _check_rx(argname, val, custom_err = None):
if not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
return re.compile(val)
except:
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
def _check_str(argname, val, rx, custom_err = None):
"""
This is not really check val is ASCII.
2021 09: we are now using version 17.4.0 -> we do not need to convert to
bytes here anymore, we are using a recent verison of cherrypy.
We merged the funcionality of _check_str and _check_ustr into a single function
:type val: str or bytes (only utf8 encoded string) in py3, unicode or str in py2
:type rx: regex, compiled from native str (unicode in py3, bytes in py2)
"""
val = decodeBytesToUnicodeConditional(val, condition=PY3)
val = encodeUnicodeToBytesConditional(val, condition=PY2)
# `val` should now be a "native str" (unicode in py3, bytes in py2)
# here str has not been redefined. it is default `str` in both py2 and py3.
if not isinstance(val, str) or not rx.match(val):
raise InvalidParameter(return_message("Incorrect '%s' parameter %s %s" % (argname, type(val), val), custom_err))
return val
def _check_num(argname, val, bare, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Integral) and (not isinstance(val, (newstr, newbytes)) or (bare and not val.isdigit())):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = int(val)
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _check_real(argname, val, special, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Number) and not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = float(val)
if not special and (math.isnan(n) or math.isinf(n)):
raise InvalidParameter(return_message("Parameter '%s' improper value" % argname, custom_err))
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _validate_one(argname, param, safe, checker, optional, *args):
val = param.kwargs.get(argname, None)
if optional and val == None:
safe.kwargs[argname] = None
else:
safe.kwargs[argname] = checker(argname, val, *args)
del param.kwargs[argname]
def _validate_all(argname, param, safe, checker, *args):
safe.kwargs[argname] = [checker(argname, v, *args) for v in _arglist(argname, param.kwargs)]
if argname in param.kwargs:
del param.kwargs[argname]
def validate_rx(argname, param, safe, optional = False, custom_err = None):
"""Validates that an argument is a valid regexp.
Checks that an argument named `argname` exists in `param.kwargs`,
and it a string which compiles into a python regular expression.
If successful, the regexp object (not the string) is copied into
`safe.kwargs` and the string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_rx, optional, custom_err)
def validate_str(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
Accepts both unicode strings and utf8-encoded bytes strings as argument
string.
Accepts regex compiled only with "native strings", which means str in both
py2 and py3 (unicode in py3, bytes of utf8-encoded strings in py2)
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_ustr(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp,
During the py2->py3 modernization, _check_str and _check_ustr have been
merged into a single function called _check_str.
This function is now the same as validate_str, but is kept nonetheless
not to break our client's code.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_num(argname, param, safe, optional = False,
bare = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid integer number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is an int or a string convertible to a valid number. If successful
the integer value (not the string) is copied into `safe.kwargs`
and the original int/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
If `bare` is True, the number is required to be a pure digit sequence if it is a string.
Otherwise anything accepted by `int(val)` is acceted, including for
example leading white space or sign. Note that either way arbitrarily
large values are accepted; if you want to prevent abuse against big
integers, use the `minval` and `maxval` thresholds described below,
or check the length the of the string against some limit first.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_num, optional, bare, minval, maxval, custom_err)
def validate_real(argname, param, safe, optional = False,
special = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid real number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is float number or a string convertible to a valid number. If successful
the float value (not the string) is copied into `safe.kwargs`
and the original float/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
Anything accepted by `float(val)` is accepted, including for example
leading white space, sign and exponent. However NaN and +/- Inf are
rejected unless `special` is True.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_real, optional, special, minval, maxval, custom_err)
def validate_rxlist(argname, param, safe, custom_err = None):
"""Validates that an argument is an array of strings, each of which
can be compiled into a python regexp object.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which compiles into a regular expression.
If successful the array is copied into `safe.kwargs` and the value is
removed from `param.kwargs`. The value always becomes an array in
`safe.kwargs`, even if no or only one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_rx, custom_err)
def validate_strlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_ustrlist` instead if the argument string might need
to be converted from utf-8 into unicode first. Use this method only
for inputs which are meant to be bare strings.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_str, rx, custom_err)
def validate_ustrlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp once converted from utf-8 into unicode.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_strlist` instead if the argument strings should always
be bare strings. This one automatically converts everything into
unicode and expects input exclusively in utf-8, which may not be
appropriate constraints for some uses.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_ustr, rx, custom_err)
def validate_numlist(argname, param, safe, bare=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_num()`.
Checks that an argument named `argname` is either a single string/int or
an array of strings/int, each of which validates with `validate_num` and
`bare`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `kwsafe`, even if no or only one
argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_num, bare, minval, maxval, custom_err)
def validate_reallist(argname, param, safe, special=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_real()`.
Checks that an argument named `argname` is either a single string/float or
an array of strings/floats, each of which validates with `validate_real` and
`special`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `safe.kwargs`, even if no or only
one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_real, special, minval, maxval, custom_err)
def validate_no_more_input(param):
"""Verifies no more input is left in `param.args` or `param.kwargs`."""
if param.args:
raise InvalidParameter("Excess path arguments, not validated args='%s'" % param.args)
if param.kwargs:
raise InvalidParameter("Excess keyword arguments, not validated kwargs='%s'" % param.kwargs)
def validate_lengths(safe, *names):
"""Verifies that all `names` exist in `safe.kwargs`, are lists, and
all the lists have the same length. This is convenience function for
checking that an API accepting multiple values receives equal number
of values for all of its parameters."""
refname = names[0]
if refname not in safe.kwargs or not isinstance(safe.kwargs[refname], list):
raise InvalidParameter("Incorrect '%s' parameter" % refname)
reflen = len(safe.kwargs[refname])
for other in names[1:]:
if other not in safe.kwargs or not isinstance(safe.kwargs[other], list):
raise InvalidParameter("Incorrect '%s' parameter" % other)
elif len(safe.kwargs[other]) != reflen:
raise InvalidParameter("Mismatched number of arguments: %d %s vs. %d %s"
% (reflen, refname, len(safe.kwargs[other]), other)) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/REST/Validation.py | 0.600774 | 0.233335 | Validation.py | pypi |
from __future__ import print_function
import gzip
from builtins import str, bytes, object
from Utils.PythonVersion import PY3
from Utils.Utilities import encodeUnicodeToBytes, encodeUnicodeToBytesConditional
from future.utils import viewitems
import hashlib
import json
import xml.sax.saxutils
import zlib
from traceback import format_exc
import cherrypy
from WMCore.REST.Error import RESTError, ExecutionError, report_rest_error
try:
from cherrypy.lib import httputil
except ImportError:
from cherrypy.lib import http as httputil
def vary_by(header):
"""Add 'Vary' header for `header`."""
varies = cherrypy.response.headers.get('Vary', '')
varies = [x.strip() for x in varies.split(",") if x.strip()]
if header not in varies:
varies.append(header)
cherrypy.response.headers['Vary'] = ", ".join(varies)
def is_iterable(obj):
"""Check if `obj` is iterable."""
try:
iter(obj)
except TypeError:
return False
else:
return True
class RESTFormat(object):
def __call__(self, stream, etag):
"""Main entry point for generating output for `stream` using `etag`
object to generate ETag header. Returns a generator function for
producing a verbatim copy of `stream` item, including any premables
and trailers needed for the selected format. The intention is that
the caller will use the iterable to generate chunked HTTP transfer
encoding, or a simple result such as an image."""
# Make 'stream' iterable. We convert everything to chunks here.
# The final stream consumer will collapse small responses back
# to a single string. Convert files to 1MB chunks.
if stream is None:
stream = ['']
elif isinstance(stream, (str, bytes)):
stream = [stream]
elif hasattr(stream, "read"):
# types.FileType is not available anymore in python3,
# using it raises pylint W1624.
# Since cherrypy.lib.file_generator only uses the .read() attribute
# of a file, we simply check if stream.read() is present instead.
# https://github.com/cherrypy/cherrypy/blob/2a8aaccd649eb1011382c39f5cd93f76f980c0b1/cherrypy/lib/__init__.py#L64
stream = cherrypy.lib.file_generator(stream, 512 * 1024)
return self.stream_chunked(stream, etag, *self.chunk_args(stream))
def chunk_args(self, stream):
"""Return extra arguments needed for `stream_chunked()`. The default
return an empty tuple, so no extra arguments. Override in the derived
class if `stream_chunked()` needs preamble or trailer arguments."""
return tuple()
class XMLFormat(RESTFormat):
"""Format an iterable of objects into XML encoded in UTF-8.
Generates normally first a preamble, a stream of XML-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then XML encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if iterating over input is
deterministic. Beware in particular the key order for a dict is
arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is generated as an XML document whose top-level entity name
is defined by the label given at the formatter construction time. The
caller must define ``cherrypy.request.rest_generate_data`` to element
name for wrapping stream contents. Usually the top-level entity is the
application name and the ``cherrypy.request.rest_generate_data`` is
``result``.
Iterables are output as ``<array><i>ITEM</i><i>ITEM</i></array>``,
dictionaries as ``<dict><key>KEY</key><value>VALUE</value></dict>``.
`None` is output as empty contents, and hence there is no way to
distinguish `None` and an empty string from each other. Scalar types
are output as rendered by `str()`, but obviously XML encoding unsafe
characters. This class does not support formatting arbitrary types.
The formatter does not insert any spaces into the output. Although the
output is generated as a preamble, stream of objects, and trailer just
like by the `JSONFormatter`, each of which is a separate HTTP transfer
chunk, the output does *not* have guaranteed line-oriented structure
like the `JSONFormatter` produces. Note in particular that if the data
stream contains strings with newlines, the output will have arbitrary
line structure. On the other hand, as the output is well-formed XML,
virtually all SAX processors can read the stream incrementally even if
the client isn't able to fully preserve chunked HTTP transfer encoding."""
def __init__(self, label):
self.label = label
@staticmethod
def format_obj(obj):
"""Render an object `obj` into XML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
result = xml.sax.saxutils.escape(obj).encode("utf-8")
elif isinstance(obj, bytes):
result = xml.sax.saxutils.escape(obj)
elif isinstance(obj, (int, float, bool)):
result = xml.sax.saxutils.escape(str(obj)).encode("utf-8")
elif isinstance(obj, dict):
result = "<dict>"
for k, v in viewitems(obj):
result += "<key>%s</key><value>%s</value>" % \
(xml.sax.saxutils.escape(k).encode("utf-8"),
XMLFormat.format_obj(v))
result += "</dict>"
elif is_iterable(obj):
result = "<array>"
for v in obj:
result += "<i>%s</i>" % XMLFormat.format_obj(v)
result += "</array>"
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = XMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n"
preamble += "<%s>" % self.label
if cherrypy.request.rest_generate_preamble:
desc = self.format_obj(cherrypy.request.rest_generate_preamble)
preamble += "<desc>%s</desc>" % desc
preamble += "<%s>" % cherrypy.request.rest_generate_data
trailer = "</%s></%s>" % (cherrypy.request.rest_generate_data, self.label)
return preamble, trailer
class JSONFormat(RESTFormat):
"""Format an iterable of objects into JSON.
Generates normally first a preamble, a stream of JSON-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then JSON encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if `cjson.encode()` output is
deterministic for the input. Beware in particular the key order for a
dict is arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is always generated as a JSON dictionary. The caller must
define ``cherrypy.request.rest_generate_data`` as the key for actual
contents, usually something like "result". The `stream` value will be
generated as an array value for that key.
If ``cherrypy.request.rest_generate_preamble`` is a non-empty list, it
is output as the ``desc`` key value in the preamble before outputting
the `stream` contents. Otherwise the output consists solely of `stream`.
A common use of ``rest_generate_preamble`` is list of column labels
with `stream` an iterable of lists of column values.
The output is guaranteed to contain one line of preamble which starts a
dictionary and an array ("``{key: [``"), one line of JSON rendering of
each object in `stream`, with the first line starting with exactly one
space and second and subsequent lines starting with a comma, and one
final trailer line consisting of "``]}``". Each line is generated as a
HTTP transfer chunk. This format is fixed so readers can be constructed
to read and parse the stream incrementally one line at a time,
facilitating maximum throughput processing of the response."""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
obj = None
try:
for obj in stream:
chunk = comma + json.dumps(obj) + "\n"
etag.update(chunk)
yield chunk
comma = ","
except cherrypy.HTTPError:
raise
except GeneratorExit:
etag.invalidate()
trailer = None
raise
except Exception as exp:
print("ERROR, json.dumps failed to serialize %s, type %s\nException: %s" \
% (obj, type(obj), str(exp)))
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except cherrypy.HTTPError:
raise
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as JSON reply."""
comma = ""
preamble = "{"
trailer = "]}\n"
if cherrypy.request.rest_generate_preamble:
desc = json.dumps(cherrypy.request.rest_generate_preamble)
preamble += '"desc": %s' % desc
comma = ", "
preamble += '%s"%s": [\n' % (comma, cherrypy.request.rest_generate_data)
return preamble, trailer
class PrettyJSONFormat(JSONFormat):
""" Format used for human, (web browser)"""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = comma + json.dumps(obj, indent=2)
etag.update(chunk)
yield chunk
comma = ","
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
class PrettyJSONHTMLFormat(PrettyJSONFormat):
""" Format used for human, (web browser) wrap around html tag on json"""
@staticmethod
def format_obj(obj):
"""Render an object `obj` into HTML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
obj = xml.sax.saxutils.quoteattr(obj)
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, bytes):
obj = xml.sax.saxutils.quoteattr(str(obj, "utf-8"))
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, (int, float, bool)):
result = "%s" % obj
elif isinstance(obj, dict):
result = "<ul>"
for k, v in viewitems(obj):
result += "<li><b>%s</b>: %s</li>" % (k, PrettyJSONHTMLFormat.format_obj(v))
result += "</ul>"
elif is_iterable(obj):
empty = True
result = "<details open><ul>"
for v in obj:
empty = False
result += "<li>%s</li>" % PrettyJSONHTMLFormat.format_obj(v)
result += "</ul></details>"
if empty:
result = ""
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = PrettyJSONHTMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<html><body>"
trailer = "</body></html>"
return preamble, trailer
class RawFormat(RESTFormat):
"""Format an iterable of objects as raw data.
Generates raw data completely unmodified, for example image data or
streaming arbitrary external data files including even plain text.
Computes an ETag on the output in the process. The result is always
chunked, even simple strings on input. Usually small enough responses
will automatically be converted back to a single string response post
compression and ETag processing.
Any exceptions raised by input stream are reported to `report_rest_error`
and swallowed, as this is normally used to generate output for CherryPy
responses, which cannot handle exceptions reasonably after the output
generation begins; later processing may reconvert those back to exceptions
however (cf. stream_maybe_etag()). A X-REST-Status trailer header is added
if (and only if) an exception occurs; the client must inspect that to find
out if it got the complete output. There is normally 'X-REST-Status: 100'
in normal response headers, and it remains valid in case of success.
No ETag header is generated in case of an exception."""
def stream_chunked(self, stream, etag):
"""Generator for actually producing the output."""
try:
for chunk in stream:
etag.update(chunk)
yield chunk
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
except BaseException:
etag.invalidate()
raise
class DigestETag(object):
"""Compute hash digest over contents for ETag header."""
algorithm = None
def __init__(self, algorithm=None):
"""Prepare ETag computer."""
self.digest = hashlib.new(algorithm or self.algorithm)
def update(self, val):
"""Process response data `val`."""
if self.digest:
self.digest.update(encodeUnicodeToBytes(val))
def value(self):
"""Return ETag header value for current input."""
return self.digest and '"%s"' % self.digest.hexdigest()
def invalidate(self):
"""Invalidate the ETag calculator so value() will return None."""
self.digest = None
class MD5ETag(DigestETag):
"""Compute MD5 hash over contents for ETag header."""
algorithm = 'md5'
class SHA1ETag(DigestETag):
"""Compute SHA1 hash over contents for ETag header."""
algorithm = 'sha1'
def _stream_compress_identity(reply, *args):
"""Streaming compressor which returns original data unchanged."""
return reply
def _stream_compress_deflate(reply, compress_level, max_chunk):
"""Streaming compressor for the 'deflate' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
# Create zlib compression object, with raw data stream (negative window size)
z = zlib.compressobj(compress_level, zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
# Data pending compression. We only take entire chunks from original
# reply. Then process reply one chunk at a time. Whenever we have enough
# data to compress, spit it out flushing the zlib engine entirely, so we
# respect original chunk boundaries.
npending = 0
pending = []
for chunk in reply:
pending.append(chunk)
npending += len(chunk)
if npending >= max_chunk:
part = z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FULL_FLUSH)
pending = []
npending = 0
yield part
# Crank the compressor one more time for remaining output.
if npending:
yield z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FINISH)
def _stream_compress_gzip(reply, compress_level, *args):
"""Streaming compressor for the 'gzip' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
data = []
for chunk in reply:
data.append(chunk)
if data:
yield gzip.compress(encodeUnicodeToBytes("".join(data)), compress_level)
# : Stream compression methods.
_stream_compressor = {
'identity': _stream_compress_identity,
'deflate': _stream_compress_deflate,
'gzip': _stream_compress_gzip
}
def stream_compress(reply, available, compress_level, max_chunk):
"""If compression has been requested via Accept-Encoding request header,
and is granted for this response via `available` compression methods,
convert the streaming `reply` into another streaming response which is
compressed at the exact chunk boundaries of the original response,
except that individual chunks may be coalesced up to `max_chunk` size.
The `compression_level` tells how hard to compress, zero disables the
compression entirely."""
global _stream_compressor
for enc in cherrypy.request.headers.elements('Accept-Encoding'):
if enc.value not in available:
continue
elif enc.value in _stream_compressor and compress_level > 0:
# Add 'Vary' header for 'Accept-Encoding'.
vary_by('Accept-Encoding')
# Compress contents at original chunk boundaries.
if 'Content-Length' in cherrypy.response.headers:
del cherrypy.response.headers['Content-Length']
cherrypy.response.headers['Content-Encoding'] = enc.value
return _stream_compressor[enc.value](reply, compress_level, max_chunk)
return reply
def _etag_match(status, etagval, match, nomatch):
"""Match ETag value against any If-Match / If-None-Match headers."""
# Execute conditions only for status 2xx. We only handle GET/HEAD
# requests here, it makes no sense to try to do this for PUT etc.
# as they need to be handled as request pre-condition, not in the
# streaming out part here.
if cherrypy.request.method in ('GET', 'HEAD'):
status, dummyReason, dummyMsg = httputil.valid_status(status)
if status >= 200 and status <= 299:
if match and ("*" in match or etagval in match):
raise cherrypy.HTTPError(412, "Precondition on ETag %s failed" % etagval)
if nomatch and ("*" in nomatch or etagval in nomatch):
raise cherrypy.HTTPRedirect([], 304)
def _etag_tail(head, tail, etag):
"""Generator which first returns anything in `head`, then `tail`.
Sets ETag header at the end to value of `etag` if it's defined and
yields a value."""
for chunk in head:
yield encodeUnicodeToBytes(chunk)
for chunk in tail:
yield encodeUnicodeToBytes(chunk)
etagval = (etag and etag.value())
if etagval:
cherrypy.response.headers["ETag"] = etagval
def stream_maybe_etag(size_limit, etag, reply):
"""Maybe generate ETag header for the response, and handle If-Match
and If-None-Match request headers. Consumes the reply until at most
`size_limit` bytes. If the response fits into that size, adds the
ETag header and matches it against any If-Match / If-None-Match
request headers and replies appropriately.
If the response is fully buffered, and the `reply` generator actually
results in an error and sets X-Error-HTTP / X-Error-Detail headers,
converts that error back into a real HTTP error response. Otherwise
responds with the fully buffered body directly, without generator
and chunking. In other words, responses smaller than `size_limit`
are always fully buffered and replied immediately without chunking.
If the response is not fully buffered, it's guaranteed to be output
at original chunk boundaries.
Note that if this function is fed the output from `stream_compress()`
as it normally would be, the `size_limit` constrains the compressed
size, and chunk boundaries correspond to compressed chunks."""
req = cherrypy.request
res = cherrypy.response
match = [str(x) for x in (req.headers.elements('If-Match') or [])]
nomatch = [str(x) for x in (req.headers.elements('If-None-Match') or [])]
# If ETag is already set, match conditions and output without buffering.
etagval = res.headers.get('ETag', None)
if etagval:
_etag_match(res.status or 200, etagval, match, nomatch)
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail([], reply, None)
# Buffer up to size_limit bytes internally. This interally builds up the
# ETag value inside 'etag'. In case of exceptions the ETag invalidates.
# If we exceed the limit, fall back to streaming without checking ETag
# against If-Match/If-None-Match. We'll still set the ETag in the trailer
# headers, so clients which understand trailers will get the value; most
# clients including browsers will ignore them.
size = 0
result = []
for chunk in reply:
result.append(chunk)
size += len(chunk)
if size > size_limit:
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail(result, reply, etag)
# We've buffered the entire response, but it may be an error reply. The
# generator code does not know if it's allowed to raise exceptions, so
# it swallows all errors and converts them into X-* headers. We recover
# the original HTTP response code and message from X-Error-{HTTP,Detail}
# headers, if any are present.
err = res.headers.get('X-Error-HTTP', None)
if err:
message = res.headers.get('X-Error-Detail', 'Original error lost')
raise cherrypy.HTTPError(int(err), message)
# OK, we buffered the entire reply and it's ok. Check ETag match criteria.
# The original stream generator must guarantee that if it fails it resets
# the 'etag' value, even if the error handlers above didn't run.
etagval = etag.value()
if etagval:
res.headers['ETag'] = etagval
_etag_match(res.status or 200, etagval, match, nomatch)
# OK, respond with the buffered reply as a plain string.
res.headers['Content-Length'] = size
# TODO investigate why `result` is a list of bytes strings in py3
# The current solution seems to work in both py2 and py3
resp = b"" if PY3 else ""
for item in result:
resp += encodeUnicodeToBytesConditional(item, condition=PY3)
assert len(resp) == size
return resp | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/REST/Format.py | 0.843605 | 0.218909 | Format.py | pypi |
from Utils.Utilities import encodeUnicodeToBytes
from future.utils import viewitems, viewvalues, listitems
import os, hmac, hashlib, cherrypy
from tempfile import NamedTemporaryFile
from Utils.PythonVersion import PY3
from WMCore.REST.Main import RESTMain
from WMCore.REST.Auth import authz_canonical
from WMCore.Configuration import Configuration
def fake_authz_headers(hmac_key, method = 'HNLogin',
login='testuser', name='Test User',
dn="/test/dn", roles={}, format="list"):
"""Create fake authentication and authorisation headers compatible
with the CMSWEB front-ends. Assumes you have the HMAC signing key
the back-end will use to validate the headers.
:arg str hmac_key: binary key data for signing headers.
:arg str method: authentication method, one of X509Cert, X509Proxy,
HNLogin, HostIP, AUCookie or None.
:arg str login: account login name.
:arg str name: account user name.
:arg str dn: account X509 subject.
:arg dict roles: role dictionary, each role with 'site' and 'group' lists.
:returns: list of header name, value tuples to add to a HTTP request."""
headers = { 'cms-auth-status': 'OK', 'cms-authn-method': method }
if login:
headers['cms-authn-login'] = login
if name:
headers['cms-authn-name'] = name
if dn:
headers['cms-authn-dn'] = dn
for name, role in viewitems(roles):
name = 'cms-authz-' + authz_canonical(name)
headers[name] = []
for r in 'site', 'group':
if r in role:
headers[name].extend(["%s:%s" % (r, authz_canonical(v)) for v in role[r]])
headers[name] = " ".join(headers[name])
prefix = suffix = ""
hkeys = list(headers)
for hk in sorted(hkeys):
if hk != 'cms-auth-status':
prefix += "h%xv%x" % (len(hk), len(headers[hk]))
suffix += "%s%s" % (hk, headers[hk])
msg = prefix + "#" + suffix
if PY3:
hmac_key = encodeUnicodeToBytes(hmac_key)
msg = encodeUnicodeToBytes(msg)
cksum = hmac.new(hmac_key, msg, hashlib.sha1).hexdigest()
headers['cms-authn-hmac'] = cksum
if format == "list":
return listitems(headers)
else:
return headers
def fake_authz_key_file(delete=True):
"""Create temporary file for fake authorisation hmac signing key.
:returns: Instance of :class:`~.NamedTemporaryFile`, whose *data*
attribute contains the HMAC signing binary key."""
t = NamedTemporaryFile(delete=delete)
with open("/dev/urandom", "rb") as fd:
t.data = fd.read(20)
t.write(t.data)
t.seek(0)
return t
def setup_dummy_server(module_name, class_name, app_name = None, authz_key_file=None, port=8888):
"""Helper function to set up a :class:`~.RESTMain` server from given
module and class. Creates a fake server configuration and instantiates
the server application from it.
:arg str module_name: module from which to import test class.
:arg str class_type: name of the server test class.
:arg str app_name: optional test application name, 'test' by default.
:returns: tuple with the server object and authz hmac signing key."""
if authz_key_file:
test_authz_key = authz_key_file
else:
test_authz_key = fake_authz_key_file()
cfg = Configuration()
main = cfg.section_('main')
main.application = app_name or 'test'
main.silent = True
main.index = 'top'
main.authz_defaults = { 'role': None, 'group': None, 'site': None }
main.section_('tools').section_('cms_auth').key_file = test_authz_key.name
app = cfg.section_(app_name or 'test')
app.admin = 'dada@example.org'
app.description = app.title = 'Test'
views = cfg.section_('views')
top = views.section_('top')
top.object = module_name + "." + class_name
server = RESTMain(cfg, os.getcwd())
server.validate_config()
server.setup_server()
server.install_application()
cherrypy.config.update({'server.socket_port': port})
cherrypy.config.update({'server.socket_host': '127.0.0.1'})
cherrypy.config.update({'request.show_tracebacks': True})
cherrypy.config.update({'environment': 'test_suite'})
for app in viewvalues(cherrypy.tree.apps):
if '/' in app.config:
app.config["/"]["request.show_tracebacks"] = True
return server, test_authz_key | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/REST/Test.py | 0.631935 | 0.193147 | Test.py | pypi |
from __future__ import division, print_function, absolute_import
from future import standard_library
standard_library.install_aliases()
# system modules
import json
import logging
import math
import re
import time
from urllib.parse import quote, unquote
# WMCore modules
from Utils.IteratorTools import grouper
from Utils.CertTools import ckey, cert
from WMCore.Services.pycurl_manager import RequestHandler
from WMCore.Services.pycurl_manager import getdata as multi_getdata
# DBS agregators
from dbs.apis.dbsClient import aggRuns, aggFileLumis
# static variables
STEP_PAT = re.compile(r'Step[0-9]')
TASK_PAT = re.compile(r'Task[0-9]')
def hasHTTPFailed(row):
"""
Evaluates whether the HTTP request through PyCurl failed or not.
:param row: dictionary data returned from pycurl_manager module
:return: a boolean confirming failure or not
"""
if 'data' not in row:
return True
if int(row.get('code', 200)) == 200:
return False
return True
def getMSLogger(verbose, logger=None):
"""
_getMSLogger_
Return a logger object using the standard WMCore formatter
:param verbose: boolean setting debug or not
:return: a logger object
"""
if logger:
return logger
verbose = logging.DEBUG if verbose else logging.INFO
logger = logging.getLogger()
logging.basicConfig(format="%(asctime)s:%(levelname)s:%(module)s: %(message)s",
level=verbose)
return logger
def isRelVal(reqDict):
"""
Helper function to evaluate whether the workflow is RelVal or not.
:param reqDict: dictionary with the workflow description
:return: True if it's a RelVal workflow, otherwise False
"""
return reqDict.get("SubRequestType", "") in ['RelVal', 'HIRelVal']
def dbsInfo(datasets, dbsUrl):
"Provides DBS info about dataset blocks"
datasetBlocks = {}
datasetSizes = {}
datasetTransfers = {}
if not datasets:
return datasetBlocks, datasetSizes, datasetTransfers
urls = ['%s/blocks?detail=True&dataset=%s' % (dbsUrl, d) for d in datasets]
logging.info("Executing %d requests against DBS 'blocks' API, with details", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if hasHTTPFailed(row):
print("FAILURE: dbsInfo for %s. Error: %s %s" % (dataset, row.get('code'), row.get('error')))
continue
rows = json.loads(row['data'])
blocks = []
size = 0
datasetTransfers.setdefault(dataset, {}) # flat dict in the format of blockName: blockSize
for item in rows:
blocks.append(item['block_name'])
size += item['block_size']
datasetTransfers[dataset].update({item['block_name']: item['block_size']})
datasetBlocks[dataset] = blocks
datasetSizes[dataset] = size
return datasetBlocks, datasetSizes, datasetTransfers
def getPileupDocs(mspileupUrl, queryDict):
"""
Fetch documents from MSPileup according to the query passed in.
:param mspileupUrl: string with the MSPileup url
:param queryDict: dictionary with the MongoDB query to run
:return: returns a list with all the pileup objects, or raises
an exception in case of failure
"""
mgr = RequestHandler()
headers = {'Content-Type': 'application/json'}
data = mgr.getdata(mspileupUrl, queryDict, headers, verb='POST',
ckey=ckey(), cert=cert(), encode=True, decode=True)
if data and data.get("result", []):
if "error" in data["result"][0]:
msg = f"Failed to retrieve MSPileup documents with query: {queryDict}"
msg += f" and error message: {data}"
raise RuntimeError(msg)
return data["result"]
def getPileupDatasetSizes(datasets, phedexUrl):
"""
Given a list of datasets, find all their blocks with replicas
available, i.e., blocks that have valid files to be processed,
and calculate the total dataset size
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:return: a dictionary of datasets and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
sizeByDset = {}
if not datasets:
return sizeByDset
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
sizeByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
sizeByDset.setdefault(dataset, 0)
try:
for item in rows['phedex']['block']:
sizeByDset[dataset] += item['bytes']
except Exception as exc:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s" % (dataset, str(exc)))
sizeByDset[dataset] = None
return sizeByDset
def getBlockReplicasAndSize(datasets, phedexUrl, group=None):
"""
Given a list of datasets, find all their blocks with replicas
available (thus blocks with at least 1 valid file), completed
and subscribed.
If PhEDEx group is provided, make sure it's subscribed under that
same group.
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:param group: optional PhEDEx group name
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
dsetBlockSize = {}
if not datasets:
return dsetBlockSize
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getBlockReplicasAndSize for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
dsetBlockSize.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
dsetBlockSize.setdefault(dataset, {})
try:
for item in rows['phedex']['block']:
block = {item['name']: {'blockSize': item['bytes'], 'locations': []}}
for repli in item['replica']:
if repli['complete'] == 'y' and repli['subscribed'] == 'y':
if not group:
block[item['name']]['locations'].append(repli['node'])
elif repli['group'] == group:
block[item['name']]['locations'].append(repli['node'])
dsetBlockSize[dataset].update(block)
except Exception as exc:
print("Failure in getBlockReplicasAndSize for dataset %s. Error: %s" % (dataset, str(exc)))
dsetBlockSize[dataset] = None
return dsetBlockSize
def getPileupSubscriptions(datasets, phedexUrl, group=None, percentMin=99):
"""
Provided a list of datasets, find dataset level subscriptions where it's
as complete as `percent_min`.
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:param group: optional string with the PhEDEx group
:param percent_min: only return subscriptions that are this complete
:return: a dictionary of datasets and a list of their location.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
locationByDset = {}
if not datasets:
return locationByDset
if group:
url = "%s/subscriptions?group=%s" % (phedexUrl, group)
url += "&percent_min=%s&dataset=%s"
else:
url = "%s/subscriptions?" % phedexUrl
url += "percent_min=%s&dataset=%s"
urls = [url % (percentMin, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'subscriptions' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].rsplit('=')[-1]
if row['data'] is None:
print("Failure in getPileupSubscriptions for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
locationByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
locationByDset.setdefault(dataset, [])
try:
for item in rows['phedex']['dataset']:
for subs in item['subscription']:
locationByDset[dataset].append(subs['node'])
except Exception as exc:
print("Failure in getPileupSubscriptions for dataset %s. Error: %s" % (dataset, str(exc)))
locationByDset[dataset] = None
return locationByDset
def getBlocksByDsetAndRun(datasetName, runList, dbsUrl):
"""
Given a dataset name and a list of runs, find all the blocks
:return: flat list of blocks
"""
blocks = set()
if isinstance(runList, set):
runList = list(runList)
urls = []
for runSlice in grouper(runList, 50):
urls.append('%s/blocks?run_num=%s&dataset=%s' % (dbsUrl, str(runSlice).replace(" ", ""), datasetName))
logging.info("Executing %d requests against DBS 'blocks' API, with run_num list", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].rsplit('=')[-1]
if hasHTTPFailed(row):
msg = "Failure in getBlocksByDsetAndRun for %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
for item in rows:
blocks.add(item['block_name'])
return list(blocks)
def getFileLumisInBlock(blocks, dbsUrl, validFileOnly=1):
"""
Given a list of blocks, find their file run lumi information
in DBS for up to 10 blocks concurrently
:param blocks: list of block names
:param dbsUrl: string with the DBS URL
:param validFileOnly: integer flag for valid files only or not
:return: a dict of blocks with list of file/run/lumi info
"""
runLumisByBlock = {}
urls = ['%s/filelumis?validFileOnly=%d&block_name=%s' % (dbsUrl, validFileOnly, quote(b)) for b in blocks]
# limit it to 10 concurrent calls not to overload DBS
logging.info("Executing %d requests against DBS 'filelumis' API, concurrency limited to 10", len(urls))
data = multi_getdata(urls, ckey(), cert(), num_conn=10)
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
if hasHTTPFailed(row):
msg = "Failure in getFileLumisInBlock for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
rows = aggFileLumis(rows) # adjust to DBS Go server output
runLumisByBlock.setdefault(blockName, [])
for item in rows:
runLumisByBlock[blockName].append(item)
return runLumisByBlock
def findBlockParents(blocks, dbsUrl):
"""
Helper function to find block parents given a list of block names.
Return a dictionary in the format of:
{"child dataset name": {"child block": ["parent blocks"],
"child block": ["parent blocks"], ...}}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
parentsByBlock = {}
urls = ['%s/blockparents?block_name=%s' % (dbsUrl, quote(b)) for b in blocks]
logging.info("Executing %d requests against DBS 'blockparents' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
dataset = blockName.split("#")[0]
if hasHTTPFailed(row):
print("Failure in findBlockParents for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error')))
parentsByBlock.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
try:
if dataset in parentsByBlock and parentsByBlock[dataset] is None:
# then one of the block calls has failed, keep it failed!
continue
parentsByBlock.setdefault(dataset, {})
for item in rows:
parentsByBlock[dataset].setdefault(item['this_block_name'], set())
parentsByBlock[dataset][item['this_block_name']].add(item['parent_block_name'])
except Exception as exc:
print("Failure in findBlockParents for block %s. Error: %s" % (blockName, str(exc)))
parentsByBlock[dataset] = None
return parentsByBlock
def getRunsInBlock(blocks, dbsUrl):
"""
Provided a list of block names, find their run numbers
:param blocks: list of block names
:param dbsUrl: string with the DBS URL
:return: a dictionary of block names and a list of run numbers
"""
runsByBlock = {}
urls = ['%s/runs?block_name=%s' % (dbsUrl, quote(b)) for b in blocks]
logging.info("Executing %d requests against DBS 'runs' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
if hasHTTPFailed(row):
msg = "Failure in getRunsInBlock for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
rows = aggRuns(rows) # adjust to DBS Go server output
runsByBlock[blockName] = rows[0]['run_num']
return runsByBlock
def getWorkflow(requestName, reqMgrUrl):
"Get list of workflow info from ReqMgr2 data-service for given request name"
headers = {'Accept': 'application/json'}
params = {}
url = '%s/data/request/%s' % (reqMgrUrl, requestName)
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
data = json.loads(res)
return data.get('result', [])
def getDetoxQuota(url):
"Get list of workflow info from ReqMgr2 data-service for given request name"
headers = {}
params = {}
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
res = res.split('\n')
return res
def eventsLumisInfo(inputs, dbsUrl, validFileOnly=0, sumOverLumi=0):
"Get information about events and lumis for given set of inputs: blocks or datasets"
what = 'dataset'
eventsLumis = {}
if not inputs:
return eventsLumis
if '#' in inputs[0]: # inputs are list of blocks
what = 'block_name'
urls = ['%s/filesummaries?validFileOnly=%s&sumOverLumi=%s&%s=%s'
% (dbsUrl, validFileOnly, sumOverLumi, what, quote(i)) for i in inputs]
data = multi_getdata(urls, ckey(), cert())
for row in data:
data = unquote(row['url'].split('=')[-1])
if hasHTTPFailed(row):
print("FAILURE: eventsLumisInfo for %s. Error: %s %s" % (data,
row.get('code'),
row.get('error')))
continue
rows = json.loads(row['data'])
for item in rows:
eventsLumis[data] = item
return eventsLumis
def getEventsLumis(dataset, dbsUrl, blocks=None, eventsLumis=None):
"Helper function to return number of events/lumis for given dataset or blocks"
nevts = nlumis = 0
if blocks:
missingBlocks = [b for b in blocks if b not in eventsLumis]
if missingBlocks:
eLumis = eventsLumisInfo(missingBlocks, dbsUrl)
eventsLumis.update(eLumis)
for block in blocks:
data = eventsLumis[block]
nevts += data['num_event']
nlumis += data['num_lumi']
return nevts, nlumis
if eventsLumis and dataset in eventsLumis:
data = eventsLumis[dataset]
return data['num_event'], data['num_lumi']
eLumis = eventsLumisInfo([dataset], dbsUrl)
data = eLumis.get(dataset, {'num_event': 0, 'num_lumi': 0})
return data['num_event'], data['num_lumi']
def getComputingTime(workflow, eventsLumis=None, unit='h', dbsUrl=None, logger=None):
"Return computing time per give workflow"
logger = getMSLogger(verbose=True, logger=logger)
cput = None
if 'InputDataset' in workflow:
dataset = workflow['InputDataset']
if 'BlockWhitelist' in workflow and workflow['BlockWhitelist']:
nevts, _ = getEventsLumis(dataset, dbsUrl, workflow['BlockWhitelist'], eventsLumis)
else:
nevts, _ = getEventsLumis(dataset, dbsUrl, eventsLumis=eventsLumis)
tpe = workflow['TimePerEvent']
cput = nevts * tpe
elif 'Chain' in workflow['RequestType']:
base = workflow['RequestType'].replace('Chain', '')
itask = 1
cput = 0
carryOn = {}
while True:
t = '%s%d' % (base, itask)
itask += 1
if t in workflow:
task = workflow[t]
if 'InputDataset' in task:
dataset = task['InputDataset']
if 'BlockWhitelist' in task and task['BlockWhitelist']:
nevts, _ = getEventsLumis(dataset, dbsUrl, task['BlockWhitelist'], eventsLumis)
else:
nevts, _ = getEventsLumis(dataset, dbsUrl, eventsLumis=eventsLumis)
elif 'Input%s' % base in task:
nevts = carryOn[task['Input%s' % base]]
elif 'RequestNumEvents' in task:
nevts = float(task['RequestNumEvents'])
else:
logger.debug("this is not supported, making it zero cput")
nevts = 0
tpe = task.get('TimePerEvent', 1)
carryOn[task['%sName' % base]] = nevts
if 'FilterEfficiency' in task:
carryOn[task['%sName' % base]] *= task['FilterEfficiency']
cput += tpe * nevts
else:
break
else:
nevts = float(workflow.get('RequestNumEvents', 0))
feff = float(workflow.get('FilterEfficiency', 1))
tpe = workflow.get('TimePerEvent', 1)
cput = nevts / feff * tpe
if cput is None:
return 0
if unit == 'm':
cput = cput / (60.)
if unit == 'h':
cput = cput / (60. * 60.)
if unit == 'd':
cput = cput / (60. * 60. * 24.)
return cput
def sigmoid(x):
"Sigmoid function"
return 1. / (1 + math.exp(-x))
def getNCopies(cpuHours, minN=2, maxN=3, weight=50000, constant=100000):
"Calculate number of copies for given workflow"
func = sigmoid(-constant / weight)
fact = (maxN - minN) / (1 - func)
base = (func * maxN - minN) / (func - 1)
return int(base + fact * sigmoid((cpuHours - constant) / weight))
def teraBytes(size):
"Return size in TB (Terabytes)"
return size / (1000 ** 4)
def gigaBytes(size):
"Return size in GB (Gigabytes), rounded to 2 digits"
return round(size / (1000 ** 3), 2)
def elapsedTime(time0, msg='Elapsed time', ndigits=1):
"Helper function to return elapsed time message"
msg = "%s: %s sec" % (msg, round(time.time() - time0, ndigits))
return msg
def getRequest(url, params):
"Helper function to GET data from given URL"
mgr = RequestHandler()
headers = {'Accept': 'application/json'}
verbose = 0
if 'verbose' in params:
verbose = params['verbose']
del params['verbose']
data = mgr.getdata(url, params, headers, ckey=ckey(), cert=cert(), verbose=verbose)
return data
def postRequest(url, params):
"Helper function to POST request to given URL"
mgr = RequestHandler()
headers = {'Accept': 'application/json'}
verbose = 0
if 'verbose' in params:
verbose = params['verbose']
del params['verbose']
data = mgr.getdata(url, params, headers, ckey=ckey(), cert=cert(),
verb='POST', verbose=verbose)
return data
def getIO(request, dbsUrl):
"Get input/output info about given request"
lhe = False
primary = set()
parent = set()
secondary = set()
if 'Chain' in request['RequestType']:
base = request['RequestType'].replace('Chain', '')
item = 1
while '%s%d' % (base, item) in request:
alhe, aprimary, aparent, asecondary = \
ioForTask(request['%s%d' % (base, item)], dbsUrl)
if alhe:
lhe = True
primary.update(aprimary)
parent.update(aparent)
secondary.update(asecondary)
item += 1
else:
lhe, primary, parent, secondary = ioForTask(request, dbsUrl)
return lhe, primary, parent, secondary
def ioForTask(request, dbsUrl):
"Return lfn, primary, parent and secondary datasets for given request"
lhe = False
primary = set()
parent = set()
secondary = set()
if 'InputDataset' in request:
datasets = request['InputDataset']
datasets = datasets if isinstance(datasets, list) else [datasets]
primary = set([r for r in datasets if r])
if primary and 'IncludeParent' in request and request['IncludeParent']:
parent = findParent(primary, dbsUrl)
if 'MCPileup' in request:
pileups = request['MCPileup']
pileups = pileups if isinstance(pileups, list) else [pileups]
secondary = set([r for r in pileups if r])
if 'LheInputFiles' in request and request['LheInputFiles'] in ['True', True]:
lhe = True
return lhe, primary, parent, secondary
def findParent(datasets, dbsUrl):
"""
Helper function to find the parent dataset.
It returns a dictionary key'ed by the child dataset
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
parentByDset = {}
if not datasets:
return parentByDset
urls = ['%s/datasetparents?dataset=%s' % (dbsUrl, d) for d in datasets]
logging.info("Executing %d requests against DBS 'datasetparents' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if hasHTTPFailed(row):
print("Failure in findParent for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
parentByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
try:
for item in rows:
parentByDset[item['this_dataset']] = item['parent_dataset']
except Exception as exc:
print("Failure in findParent for dataset %s. Error: %s" % (dataset, str(exc)))
parentByDset[dataset] = None
return parentByDset | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/MicroService/Tools/Common.py | 0.69181 | 0.217982 | Common.py | pypi |
from __future__ import print_function, division, absolute_import
from builtins import str
from future.utils import viewitems
from future import standard_library
standard_library.install_aliases()
import datetime
import json
import logging
import re
from urllib.parse import quote, unquote
from Utils.CertTools import cert, ckey
from WMCore.Services.pycurl_manager import RequestHandler
from WMCore.Services.pycurl_manager import getdata as multi_getdata
### Amount of days that we wait for stuck rules to be sorted
### After that, the rule is not considered and a new rule is created
STUCK_LIMIT = 7 # 7 days
def parseNewLineJson(stream):
"""
Parse newline delimited json streaming data
"""
for line in stream.split("\n"):
if line:
yield json.loads(line)
def stringDateToEpoch(strDate):
"""
Given a date/time in the format of:
'Thu, 29 Apr 2021 13:15:42 UTC'
it returns an integer with the equivalent EPOCH time
:param strDate: a string with the date and time
:return: the equivalent EPOCH time (integer)
"""
timestamp = datetime.datetime.strptime(strDate, "%a, %d %b %Y %H:%M:%S %Z")
return int(timestamp.strftime('%s'))
def getRucioToken(rucioAuthUrl, rucioAcct):
"""
Provided a Rucio account, fetch a token from the authentication server
:param rucioAuthUrl: url to the rucio authentication server
:param rucioAcct: rucio account to be used
:return: an integer with the expiration time in EPOCH
"""
params = {}
headers = {"X-Rucio-Account": rucioAcct}
url = '%s/auth/x509' % rucioAuthUrl
logging.info("Requesting a token to Rucio for account: %s, against url: %s", rucioAcct, rucioAuthUrl)
mgr = RequestHandler()
res = mgr.getheader(url, params=params, headers=headers, ckey=ckey(), cert=cert())
if res.getReason() == "OK":
userToken = res.getHeaderKey('X-Rucio-Auth-Token')
tokenExpiration = res.getHeaderKey('X-Rucio-Auth-Token-Expires')
logging.info("Retrieved Rucio token valid until: %s", tokenExpiration)
# convert the human readable expiration time to EPOCH time
tokenExpiration = stringDateToEpoch(tokenExpiration)
return userToken, tokenExpiration
raise RuntimeError("Failed to acquire a Rucio token. Error: {}".format(res.getReason()))
def renewRucioToken(rucioAuthUrl, userToken):
"""
Provided a user Rucio token, check it's lifetime and extend it by another hour
:param rucioAuthUrl: url to the rucio authentication server
:param rucioAcct: rucio account to be used
:return: a datetime.datetime object with the new token lifetime
"""
params = {}
headers = {"X-Rucio-Auth-Token": userToken}
url = '%s/auth/validate' % rucioAuthUrl
logging.info("Renewing the Rucio token...")
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
try:
newExpiration = eval(res)['lifetime']
except Exception as exc:
raise RuntimeError("Failed to renew Rucio token. Response: {} Error: {}".format(res, str(exc)))
return newExpiration
def getPileupContainerSizesRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of containers, find their total size in Rucio
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a flat dictionary of container and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE: Rucio version of getPileupDatasetSizes()
"""
sizeByDset = {}
if not containers:
return sizeByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = ['{}/dids/{}/{}?dynamic=anything'.format(rucioUrl, scope, cont) for cont in containers]
logging.info("Executing %d requests against Rucio for the container size", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split('/dids/{}/'.format(scope))[1]
container = container.replace("?dynamic=anything", "")
if row['data'] is None:
msg = "Failure in getPileupContainerSizesRucio for container {}. Response: {}".format(container, row)
logging.error(msg)
sizeByDset.setdefault(container, None)
continue
response = json.loads(row['data'])
try:
sizeByDset.setdefault(container, response['bytes'])
except KeyError:
msg = "getPileupContainerSizesRucio function did not return a valid response for container: %s. Error: %s"
logging.error(msg, container, response)
sizeByDset.setdefault(container, None)
continue
return sizeByDset
def listReplicationRules(containers, rucioAccount, grouping,
rucioUrl, rucioToken, scope="cms"):
"""
List all the replication rules for the input filters provided.
It builds a dictionary of container name and the locations where
they have a rule locking data on, with some additional rule state
logic in the code.
:param containers: list of container names
:param rucioAccount: string with the rucio account
:param grouping: rule grouping string, only "A" or "D" are allowed
:param rucioUrl: string with the Rucio url
:param rucioToken: string with the Rucio token
:param scope: string with the data scope
:return: a flat dictionary key'ed by the container name, with a list of RSE
expressions that still need to be resolved
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE-2: Available rule states can be found at:
https://github.com/rucio/rucio/blob/16f39dffa1608caa0a1af8bbc0fcff2965dccc50/lib/rucio/db/sqla/constants.py#L180
"""
locationByContainer = {}
if not containers:
return locationByContainer
if grouping not in ["A", "D"]:
raise RuntimeError("Replication rule grouping value provided ({}) is not allowed!".format(grouping))
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = []
for cont in containers:
urls.append('{}/rules/?scope={}&account={}&grouping={}&name={}'.format(rucioUrl, scope, rucioAccount,
grouping, quote(cont, safe="")))
logging.info("Executing %d requests against Rucio to list replication rules", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = unquote(row['url'].split("name=")[1])
if "200 OK" not in row['headers']:
msg = "Failure in listReplicationRules for container {}. Response: {}".format(container, row)
logging.error(msg)
locationByContainer.setdefault(container, None)
continue
try:
locationByContainer.setdefault(container, [])
for item in parseNewLineJson(row['data']):
if item['state'] in ["U", "SUSPENDED", "R", "REPLICATING", "I", "INJECT"]:
msg = "Container %s has a rule ID %s in state %s. Will try to create a new rule."
logging.warning(msg, container, item['id'], item['state'])
continue
elif item['state'] in ["S", "STUCK"]:
if item['error'] == 'NO_SOURCES:NO_SOURCES':
msg = "Container {} has a STUCK rule with NO_SOURCES.".format(container)
msg += " Data could be lost forever... Rule info is: {}".format(item)
logging.warning(msg)
continue
# then calculate for how long it's been stuck
utcTimeNow = int(datetime.datetime.utcnow().strftime('%s'))
if item['stuck_at']:
stuckAt = stringDateToEpoch(item['stuck_at'])
else:
# consider it to be stuck since its creation
stuckAt = stringDateToEpoch(item['created_at'])
daysStuck = (utcTimeNow - stuckAt) // (24 * 60 * 60)
if daysStuck > STUCK_LIMIT:
msg = "Container {} has a STUCK rule for {} days (limit set to: {}).".format(container,
daysStuck,
STUCK_LIMIT)
msg += " Not going to use it! Rule info: {}".format(item)
logging.warning(msg)
continue
else:
msg = "Container {} has a STUCK rule for only {} days.".format(container, daysStuck)
msg += " Considering it for the pileup location"
logging.info(msg)
else:
logging.info("Container %s has rule ID %s in state %s, using it.",
container, item['id'], item['state'])
### NOTE: this is not an RSE name, but an RSE expression that still needs to be resolved
locationByContainer[container].append(item['rse_expression'])
except Exception as exc:
msg = "listReplicationRules function did not return a valid response for container: %s."
msg += "Server responded with: %s\nError: %s"
logging.exception(msg, container, str(exc), row['data'])
locationByContainer.setdefault(container, None)
continue
return locationByContainer
def getPileupSubscriptionsRucio(datasets, rucioUrl, rucioToken, scope="cms"):
"""
Provided a list of datasets, find dataset level subscriptions where it's
as complete as `percent_min`.
:param datasets: list of dataset names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary of datasets and a list of their location.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
# FIXME: we should definitely make a feature request to Rucio...
# so much, just to get the final RSEs for a container!!!
locationByDset = {}
if not datasets:
return locationByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
# first, resolve the dataset into blocks
blocksByDset = getContainerBlocksRucio(datasets, rucioUrl, rucioToken, scope)
urls = []
for _dset, blocks in viewitems(blocksByDset):
if blocks:
for block in blocks:
urls.append('{}/replicas/{}/{}/datasets'.format(rucioUrl, scope, quote(block)))
# this is going to be bloody expensive in terms of HTTP requests
logging.info("Executing %d requests against Rucio replicas API for blocks", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
block = row['url'].split("/{}/".format(scope))[1]
block = unquote(re.sub("/datasets$", "", block, 1))
container = block.split("#")[0]
locationByDset.setdefault(container, set())
if row['data'] is None:
msg = "Failure in getPileupSubscriptionsRucio container {} and block {}.".format(container, block)
msg += " Response: {}".format(row)
logging.error(msg)
locationByDset[container] = None
continue
if locationByDset[container] is None:
# then one of the block requests failed, skip the whole dataset
continue
thisBlockRSEs = set()
for item in parseNewLineJson(row['data']):
if item['state'] == "AVAILABLE":
thisBlockRSEs.add(item["rse"])
logging.info("Block: %s is available at: %s", block, thisBlockRSEs)
# now we have the final block location
if not locationByDset[container]:
# then this is the first block of this dataset
locationByDset[container] = thisBlockRSEs
else:
# otherwise, make an intersection of them
locationByDset[container] = locationByDset[container] & thisBlockRSEs
return locationByDset
def getBlocksAndSizeRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of containers, find all their correspondent blocks and their sizes.
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE2: meant to return an output similar to Common.getBlockReplicasAndSize
"""
contBlockSize = {}
if not containers:
return contBlockSize
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = []
for cont in containers:
urls.append('{}/dids/{}/dids/search?type=dataset&long=True&name={}'.format(rucioUrl, scope, quote(cont + "#*")))
logging.info("Executing %d requests against Rucio DIDs search API for containers", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split("name=")[1]
container = unquote(container).replace("#*", "")
contBlockSize.setdefault(container, {})
if row['data'] in [None, ""]:
msg = "Failure in getBlocksAndSizeRucio function for container {}. Response: {}".format(container, row)
logging.error(msg)
contBlockSize[container] = None
continue
for item in parseNewLineJson(row['data']):
# NOTE: we do not care about primary block location in Rucio
contBlockSize[container][item['name']] = {"blockSize": item['bytes'], "locations": []}
return contBlockSize
### NOTE: likely not going to be used for a while
def getContainerBlocksRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Provided a list of containers, find all their blocks.
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary key'ed by the datasets with a list of blocks.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
blocksByDset = {}
if not containers:
return blocksByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = ['{}/dids/{}/{}/dids'.format(rucioUrl, scope, cont) for cont in containers]
logging.info("Executing %d requests against Rucio DIDs API for blocks in containers", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split("/{}/".format(scope))[1]
container = re.sub("/dids$", "", container, 1)
if not row['data']:
logging.warning("Dataset: %s has no blocks in Rucio", container)
blocksByDset.setdefault(container, [])
for item in parseNewLineJson(row['data']):
blocksByDset[container].append(item["name"])
return blocksByDset
### NOTE: likely not going to be used for a while
def getBlockReplicasAndSizeRucio(datasets, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of datasets, find all their blocks with replicas
available.
:param datasets: list of dataset names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
dsetBlockSize = {}
if not datasets:
return dsetBlockSize
headers = {"X-Rucio-Auth-Token": rucioToken}
# first, figure out their block names
blocksByDset = getContainerBlocksRucio(datasets, rucioUrl, rucioToken, scope=scope)
urls = []
for _dset, blocks in viewitems(blocksByDset):
for block in blocks:
urls.append('{}/replicas/{}/{}/datasets'.format(rucioUrl, scope, quote(block)))
# next, query the replicas API for the block location
# this is going to be bloody expensive in terms of HTTP requests
logging.info("Executing %d requests against Rucio replicas API for blocks", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
block = row['url'].split("/{}/".format(scope))[1]
block = unquote(re.sub("/datasets$", "", block, 1))
container = block.split("#")[0]
dsetBlockSize.setdefault(container, dict())
if row['data'] is None:
msg = "Failure in getBlockReplicasAndSizeRucio for container {} and block {}.".format(container, block)
msg += " Response: {}".format(row)
logging.error(msg)
dsetBlockSize[container] = None
continue
if dsetBlockSize[container] is None:
# then one of the block requests failed, skip the whole dataset
continue
thisBlockRSEs = []
blockBytes = 0
for item in parseNewLineJson(row['data']):
blockBytes = item['bytes']
if item['state'] == "AVAILABLE":
thisBlockRSEs.append(item["rse"])
# now we have the final block location
if not blockBytes and not thisBlockRSEs:
logging.warning("Block: %s has no replicas and no size", block)
else:
dsetBlockSize[container][block] = {"locations": thisBlockRSEs, "blockSize": blockBytes}
return dsetBlockSize | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/MicroService/Tools/PycurlRucio.py | 0.661923 | 0.218305 | PycurlRucio.py | pypi |
from __future__ import print_function, division
# system modules
import os
import re
# WMCore modules
from WMCore.REST.Server import RESTFrontPage
class FrontPage(RESTFrontPage):
"""MicroService front page.
MicroService provides only one web page, the front page. The page just
loads the javascript user interface, complete with CSS and all JS
code embedded into it.
The JavaScript code performs all the app functionality via the REST
interface defined by the :class:`~.Data` class.
"""
def __init__(self, app, config, mount):
"""
:arg app: reference to the application object.
:arg config: reference to the configuration.
:arg str mount: URL mount point."""
mainroot = 'microservice' # entry point in access URL
wpath = os.getenv('MS_STATIC_ROOT', '')
print(wpath)
if not wpath:
content = os.path.abspath(__file__).rsplit('/', 5)[0]
xlib = (__file__.find("/xlib/") >= 0 and "x") or ""
wpath = "%s/%sdata/" % (content, xlib)
if not wpath.endswith('/'):
wpath += '/'
print(self.__class__.__name__, "static content: %s" % wpath)
mdict = {"root": wpath, "rx": re.compile(r"^[a-z]+/[-a-z0-9]+\.(?:html)$")}
tdict = {"root": wpath + "templates/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:html|tmpl)$")}
jdict = {"root": wpath + "js/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:js)$")}
cdict = {"root": wpath + "css/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\..*(?:css)$")}
idict = {"root": wpath + "images/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:png|gif|jpg)$")}
roots = {mainroot: mdict, "templates": tdict,
"js": jdict, "css": cdict, "images": idict}
# location of frontpage in the root, e.g. microservice
frontpage = "%s/templates/index.html" % mainroot
RESTFrontPage.__init__(self, app, config, mount, frontpage, roots) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/MicroService/WebGui/FrontPage.py | 0.585931 | 0.161816 | FrontPage.py | pypi |
from __future__ import division, print_function
from future.utils import listvalues, listitems
from future import standard_library
standard_library.install_aliases()
# system modules
from operator import itemgetter
from pprint import pformat
from retry import retry
from copy import deepcopy
# WMCore modules
from Utils.IteratorTools import grouper
from WMCore.MicroService.DataStructs.DefaultStructs import TRANSFEROR_REPORT,\
TRANSFER_RECORD, TRANSFER_COUCH_DOC
from WMCore.MicroService.Tools.Common import (teraBytes, isRelVal, getPileupDocs)
from WMCore.MicroService.MSCore.MSCore import MSCore
from WMCore.MicroService.MSTransferor.RequestInfo import RequestInfo
from WMCore.MicroService.MSTransferor.DataStructs.RSEQuotas import RSEQuotas
from WMCore.Services.CRIC.CRIC import CRIC
from WMCore.Services.Rucio.RucioUtils import GROUPING_ALL
def newTransferRec(dataIn):
"""
Create a basic transfer record to be appended to a transfer document
:param dataIn: dictionary with information relevant to this transfer doc
:return: a transfer record dictionary
"""
record = deepcopy(TRANSFER_RECORD)
record["dataset"] = dataIn['name']
record["dataType"] = dataIn['type']
record["campaignName"] = dataIn['campaign']
return record
def newTransferDoc(reqName, transferRecords):
"""
Create a transfer document which is meant to be created in
central CouchDB
:param reqName: string with the workflow name
:param transferRecords: list of dictionaries with transfer records
:return: a transfer document dictionary
"""
doc = dict(TRANSFER_COUCH_DOC)
doc["workflowName"] = reqName
doc["transfers"] = transferRecords
return doc
class MSTransferor(MSCore):
"""
MSTransferor class provide whole logic behind
the transferor module.
"""
def __init__(self, msConfig, logger=None):
"""
Runs the basic setup and initialization for the MS Transferor module
:param microConfig: microservice configuration
"""
super(MSTransferor, self).__init__(msConfig, logger=logger)
# minimum percentage completion for dataset/blocks subscribed
self.msConfig.setdefault("minPercentCompletion", 99)
# minimum available storage to consider a resource good for receiving data
self.msConfig.setdefault("minimumThreshold", 1 * (1000 ** 4)) # 1TB
# limit MSTransferor to this amount of requests per cycle
self.msConfig.setdefault("limitRequestsPerCycle", 500)
# Send warning messages for any data transfer above this threshold.
# Set to negative to ignore.
self.msConfig.setdefault("warningTransferThreshold", 100. * (1000 ** 4)) # 100TB
# weight expression for the input replication rules
self.msConfig.setdefault("rucioRuleWeight", 'ddm_quota')
# Workflows with open running timeout are used for growing input dataset, thus
# make a container level rule for the whole container whenever the open running
# timeout is larger than what is configured (or the default of 7 days below)
self.msConfig.setdefault("openRunning", 7 * 24 * 60 * 60)
# define the pileup query to be executed through MSPileup
self.pileupQuery = self.msConfig.get("pileupQuery",
{"query": {"active": True}, "filters": ["expectedRSEs", "pileupName"]})
quotaAccount = self.msConfig["rucioAccount"]
self.rseQuotas = RSEQuotas(quotaAccount, self.msConfig["quotaUsage"],
minimumThreshold=self.msConfig["minimumThreshold"],
verbose=self.msConfig['verbose'], logger=logger)
self.reqInfo = RequestInfo(self.msConfig, self.rucio, self.logger)
self.cric = CRIC(logger=self.logger)
self.pileupDocs = []
self.uConfig = {}
self.campaigns = {}
self.psn2pnnMap = {}
self.pnn2psnMap = {}
self.dsetCounter = 0
self.blockCounter = 0
# service name used to route alerts via AlertManager
self.alertServiceName = "ms-transferor"
@retry(tries=3, delay=2, jitter=2)
def updateCaches(self):
"""
Fetch some data required for the transferor logic, e.g.:
* account limits from Rucio
* account usage from Rucio
* unified configuration
* all campaign configuration
* PSN to PNN map from CRIC
"""
self.logger.info("Updating RSE/PNN quota and usage")
self.rseQuotas.fetchStorageQuota(self.rucio)
self.rseQuotas.fetchStorageUsage(self.rucio)
self.rseQuotas.evaluateQuotaExceeded()
if not self.rseQuotas.getNodeUsage():
raise RuntimeWarning("Failed to fetch storage usage stats")
self.logger.info("Updating all local caches...")
self.dsetCounter = 0
self.blockCounter = 0
self.pileupDocs = getPileupDocs(self.msConfig['mspileupUrl'], self.pileupQuery)
self.uConfig = self.unifiedConfig()
campaigns = self.reqmgrAux.getCampaignConfig("ALL_DOCS")
self.psn2pnnMap = self.cric.PSNtoPNNMap()
self.pnn2psnMap = self.cric.PNNtoPSNMap()
if not self.uConfig:
raise RuntimeWarning("Failed to fetch the unified configuration")
elif not campaigns:
raise RuntimeWarning("Failed to fetch the campaign configurations")
elif not self.psn2pnnMap:
raise RuntimeWarning("Failed to fetch PSN x PNN map from CRIC")
else:
# let's make campaign look-up easier and more efficient
self.campaigns = {}
for camp in campaigns:
self.campaigns[camp['CampaignName']] = camp
self.rseQuotas.printQuotaSummary()
def execute(self, reqStatus):
"""
Executes the whole transferor logic
:param reqStatus: request status to process
:return:
"""
counterWorkflows = 0
counterFailedRequests = 0
counterProblematicRequests = 0
counterSuccessRequests = 0
summary = dict(TRANSFEROR_REPORT)
self.logger.info("Service set to process up to %s requests per cycle.",
self.msConfig["limitRequestsPerCycle"])
try:
requestRecords = self.getRequestRecords(reqStatus)
self.updateReportDict(summary, "total_num_requests", len(requestRecords))
self.logger.info("Retrieved %s requests.", len(requestRecords))
except Exception as err: # general error
requestRecords = []
msg = "Unknown exception while fetching requests from ReqMgr2. Error: %s", str(err)
self.logger.exception(msg)
self.updateReportDict(summary, "error", msg)
try:
self.updateCaches()
self.updateReportDict(summary, "total_num_active_pileups", len(self.pileupDocs))
self.updateReportDict(summary, "total_num_campaigns", len(self.campaigns))
self.updateReportDict(summary, "nodes_out_of_space", list(self.rseQuotas.getOutOfSpaceRSEs()))
except RuntimeWarning as ex:
msg = "All retries exhausted! Last error was: '%s'" % str(ex)
msg += "\nRetrying to update caches again in the next cycle."
self.logger.error(msg)
self.updateReportDict(summary, "error", msg)
return summary
except Exception as ex:
msg = "Unknown exception updating caches. Error: %s" % str(ex)
self.logger.exception(msg)
self.updateReportDict(summary, "error", msg)
return summary
# process all requests
for reqSlice in grouper(requestRecords, 100):
self.logger.info("Processing workflows from %d to %d.",
counterWorkflows + 1, counterWorkflows + len(reqSlice))
# execute data discovery
reqResults = self.reqInfo(reqSlice, self.pileupDocs)
self.logger.info("%d requests information completely processed.", len(reqResults))
for wflow in reqResults:
if not self.verifyCampaignExist(wflow):
counterProblematicRequests += 1
continue
if not self.passSecondaryCheck(wflow):
self.alertPUMisconfig(wflow.getName())
counterProblematicRequests += 1
continue
# find accepted RSEs for the workflow
rseList = self.getAcceptedRSEs(wflow)
# now check where input primary and parent blocks will need to go
self.checkDataLocation(wflow, rseList)
try:
success, transfers = self.makeTransferRequest(wflow, rseList)
except Exception as ex:
success = False
self.alertUnknownTransferError(wflow.getName())
msg = "Unknown exception while making transfer request for %s " % wflow.getName()
msg = "\tError: %s" % str(ex)
self.logger.exception(msg)
if success:
# then create a document in ReqMgr Aux DB
self.logger.info("Transfer requests successful for %s. Summary: %s",
wflow.getName(), pformat(transfers))
if self.createTransferDoc(wflow.getName(), transfers):
self.logger.info("Transfer document successfully created in CouchDB for: %s", wflow.getName())
# then move this request to staging status
self.change(wflow.getName(), 'staging', self.__class__.__name__)
counterSuccessRequests += 1
else:
counterFailedRequests += 1
self.alertTransferCouchDBError(wflow.getName())
else:
counterFailedRequests += 1
# it can go slightly beyond the limit. It's evaluated for every slice
if counterSuccessRequests >= self.msConfig["limitRequestsPerCycle"]:
msg = "Transferor succeeded acting on %d workflows in this cycle. " % counterSuccessRequests
msg += "Which exceeds the configuration limit set to: %s" % self.msConfig["limitRequestsPerCycle"]
self.logger.info(msg)
break
counterWorkflows += len(reqSlice)
self.logger.info("Summary for this cycle is:")
self.logger.info(" * there were %d problematic requests;", counterProblematicRequests)
self.logger.info(" * there were %d failed requests;", counterFailedRequests)
self.logger.info(" * there were %d successful requests;", counterSuccessRequests)
self.logger.info(" * a total of %d datasets were subscribed;", self.dsetCounter)
self.logger.info(" * a total of %d blocks were subscribed.", self.blockCounter)
self.updateReportDict(summary, "success_request_transition", counterSuccessRequests)
self.updateReportDict(summary, "failed_request_transition", counterFailedRequests)
self.updateReportDict(summary, "problematic_requests", counterProblematicRequests)
self.updateReportDict(summary, "num_datasets_subscribed", self.dsetCounter)
self.updateReportDict(summary, "num_blocks_subscribed", self.blockCounter)
self.updateReportDict(summary, "nodes_out_of_space", list(self.rseQuotas.getOutOfSpaceRSEs()))
return summary
def getRequestRecords(self, reqStatus):
"""
Queries ReqMgr2 for requests in a given status, sort them by priority
and return a subset of each request with important information for the
data placement algorithm.
"""
self.logger.info("Fetching requests in status: %s", reqStatus)
# get requests from ReqMgr2 data-service for given status
reqData = self.reqmgr2.getRequestByStatus([reqStatus], detail=True)
# we need to first put these requests in order of priority, as done for GQ...
orderedRequests = []
for requests in reqData:
orderedRequests = listvalues(requests)
orderedRequests.sort(key=itemgetter('RequestPriority'), reverse=True)
return orderedRequests
def verifyCampaignExist(self, wflow):
"""
Check whether there is a campaign for the primary dataset.
:param wflow: a workflow object
:return: True if campaigns exist, False otherwise
"""
for dataIn in wflow.getDataCampaignMap():
if dataIn["type"] == "primary":
if dataIn['campaign'] not in self.campaigns:
msg = "Workflow: %s has to transfer dataset: %s under the campaign: %s. "
msg += "This campaign does not exist and needs to be created. Skipping this workflow!"
self.logger.warning(msg, wflow.getName(), dataIn['name'], dataIn['campaign'])
return False
return True
def passSecondaryCheck(self, wflow):
"""
Check if the workflow uses active pileup and with valid location.
:param wflow: workflow object
:return: boolean whether the workflow is good to go or not
"""
pileupInput = wflow.getSecondarySummary()
if not pileupInput:
# nothing to be done here
return True
for puName, puData in pileupInput.items():
if puData['locations'] == []:
msg = f"Workflow {wflow.getName()} requires pileup dataset {puName} "
msg += "which is either not active or does not exist in MSPileup."
self.logger.warning(msg)
return False
return True
def getAcceptedRSEs(self, wflow):
"""
Given a workflow object, find it's final accepted list of
RSEs for input data placement. This is based on:
* TrustSitelists and TrustPUSitelists;
* Workflow SiteWhitelist and SiteBlacklist
* Pileup location(s)
Note that it does NOT account for RSEs out of quota.
:param wflow: wflow object
:return: a list with unique RSE names
"""
# workflow level site lists. In case there is no pileup or SecAAA=True
wflowRSEs = self._getPNNsFromPSNs(wflow.getSitelist())
if wflow.getPileupDatasets() and not wflow.getReqParam("TrustPUSitelists"):
# otherwise, data needs to be placed where pileup is
wflowRSEs = wflowRSEs & wflow.getPURSElist()
return list(wflowRSEs)
def checkDataLocation(self, wflow, rseList):
"""
Check which data is already in place (according to the site lists
and pileup data location) and remove them from the data placement
if already available anywhere.
If workflow has XRootD/AAA enabled, data location can be outside of
the SiteWhitelist.
:param wflow: workflow object
:param rseList: list of RSE names allowed for a given workflow
:return: None
"""
if not wflow.getInputDataset():
return
primAAA = wflow.getReqParam("TrustSitelists")
secAAA = wflow.getReqParam("TrustPUSitelists")
msg = f"Checking data location for request: {wflow.getName()}, "
msg += f"TrustSitelists: {primAAA}, TrustPUSitelists: {secAAA}, "
msg += f"and final accepted RSEs: {rseList}"
self.logger.info(msg)
for methodName in ("getPrimaryBlocks", "getParentBlocks"):
inputBlocks = getattr(wflow, methodName)()
self.logger.info("Request %s has %d initial blocks from %s",
wflow.getName(), len(inputBlocks), methodName)
for block, blockDict in listitems(inputBlocks): # dict can change size here
blockLocation = self._diskPNNs(blockDict['locations'])
if not blockLocation:
self.logger.info("Primary/parent block %s not available in any disk storage", block)
elif primAAA:
msg = "Primary/parent block %s already in place (via AAA): %s" % (block, blockLocation)
self.logger.info(msg)
inputBlocks.pop(block)
else:
commonLocation = set(blockLocation) & rseList
if commonLocation:
self.logger.info("Primary/parent block %s already in place: %s", block, commonLocation)
inputBlocks.pop(block)
else:
self.logger.info("block: %s will need data placement!!!", block)
self.logger.info("Request %s has %d final blocks from %s",
wflow.getName(), len(getattr(wflow, methodName)()), methodName)
def makeTransferRequest(self, wflow, rseList):
"""
Checks which input data has to be transferred, select the final destination if needed,
create the transfer record to be stored in Couch, and create the DM placement request.
This method does the following:
1. return if there is no workflow data to be transferred
2. check if the data input campaign is in the database, skip if not
3. _getValidSites: using the workflow site lists and the campaign configuration,
find a common list of sites (converted to PNNs). If the PNN is out of quota,
it's also removed from this list
4. create the transfer record dictionary
5. for every final node
5.1. if it's a pileup dataset, pick a random node and subscribe the whole container
5.2. else, retrieve chunks of blocks to be subscribed (evenly distributed)
5.3. update node usage with the amount of data subscribed
6. re-evaluate nodes with quota exceeded
7. return the transfer record, with a list of transfer IDs
:param wflow: workflow object
:param rseList: list of RSE names allowed for a given workflow
:return: boolean whether it succeeded or not, and a list of transfer records
"""
response = []
success = True
if not (wflow.getParentBlocks() or wflow.getPrimaryBlocks()):
self.logger.info("Request %s does not have any further data to transfer", wflow.getName())
return success, response
self.logger.info("Handling data subscriptions for request: %s", wflow.getName())
for dataIn in wflow.getDataCampaignMap():
if dataIn["type"] == "parent":
msg = "Skipping 'parent' data placement (done with the 'primary' data), for: %s" % dataIn
self.logger.info(msg)
continue
elif dataIn["type"] == "secondary":
# already performed by MSPileup
continue
if not isRelVal(wflow.data):
# enforce RSE quota
rses = list(set(rseList) & self.rseQuotas.getAvailableRSEs())
else:
rses = rseList
if not rses:
msg = f"Workflow: {wflow.getName()} could have data placed at: {rseList}, "
msg += "but those are all out of quota. Skipping it till next cycle"
self.logger.warning(msg)
return False, response
# create a transfer record data structure
transRec = newTransferRec(dataIn)
# figure out dids, number of copies and which grouping to use
dids, didsSize = wflow.getInputData()
grouping = wflow.getRucioGrouping()
copies = wflow.getReplicaCopies()
if not dids:
# no valid files in any blocks, it will likely fail in global workqueue
self.logger.warning(" found 0 primary/parent blocks for dataset: %s, moving on...", dataIn['name'])
return success, response
success, transferId = self.makeTransferRucio(wflow, dataIn, dids, didsSize,
grouping, copies, rses)
if not success:
# stop any other data placement for this workflow
msg = "There were failures transferring data for workflow: %s. Will retry again later."
self.logger.warning(msg, wflow.getName())
break
if transferId:
if isinstance(transferId, (set, list)):
transRec['transferIDs'].update(transferId)
else:
transRec['transferIDs'].add(transferId)
# and update some instance caches
if grouping == GROUPING_ALL:
self.dsetCounter += 1
else:
self.blockCounter += len(dids)
transRec['transferIDs'] = list(transRec['transferIDs'])
response.append(transRec)
return success, response
def makeTransferRucio(self, wflow, dataIn, dids, dataSize, grouping, copies, nodes):
"""
Creates a Rucio replication rule
:param wflow: the workflow object
:param dataIn: short summary of the data to be placed
:param dids: a list of the DIDs to be added to the rule
:param dataSize: amount of data being placed by this rule
:param grouping: whether blocks need to be placed altogether (ALL)
or if the can be scattered around (DATASET).
:param copies: integer with the number of copies to use in the rule
:param nodes: list of nodes/RSE
:return: a boolean flagging whether it succeeded or not, and the rule id
"""
success, transferId = True, set()
ruleAttrs = {'copies': copies,
'activity': 'Production Input',
'lifetime': self.msConfig['rulesLifetime'],
'account': self.msConfig['rucioAccount'],
'grouping': grouping,
'weight': self.msConfig['rucioRuleWeight'],
'meta': {'workflow_group': wflow.getWorkflowGroup()},
'comment': 'WMCore MSTransferor input data placement'}
rseExpr = "|".join(nodes)
if self.msConfig.get('enableDataTransfer', True):
# Force request-only subscription
# to any data transfer going above some threshold (do not auto-approve)
aboveWarningThreshold = (self.msConfig.get('warningTransferThreshold') > 0. and
dataSize > self.msConfig.get('warningTransferThreshold'))
# Then make the data subscription, for real!!!
self.logger.info("Creating rule for workflow %s with %d DIDs in container %s, RSEs: %s, grouping: %s",
wflow.getName(), len(dids), dataIn['name'], rseExpr, grouping)
try:
res = self.rucio.createReplicationRule(dids, rseExpr, **ruleAttrs)
except Exception as exc:
msg = "Hit a bad exception while creating replication rules for DID: %s. Error: %s"
self.logger.error(msg, dids, str(exc))
success = False
else:
if res:
# it could be that some of the DIDs already had such rule in
# place, so we might be retrieving a bunch of rule ids instead of
# a single one
self.logger.info("Rules successful created for %s : %s", dataIn['name'], res)
transferId.update(res)
# send an alert, if needed
self.alertLargeInputData(aboveWarningThreshold, transferId, wflow.getName(), dataSize, dataIn)
else:
self.logger.error("Failed to create rule for %s, will retry later", dids)
success = False
else:
msg = "DRY-RUN: making Rucio rule for workflow: %s, dids: %s, rse: %s, kwargs: %s"
self.logger.info(msg, wflow.getName(), dids, rseExpr, ruleAttrs)
return success, transferId
def alertPUMisconfig(self, workflowName):
"""
Send alert to Prometheus with PU misconfiguration error
"""
alertName = "{}: PU misconfiguration error. Workflow: {}".format(self.alertServiceName,
workflowName)
alertSeverity = "high"
alertSummary = "[MSTransferor] Workflow cannot proceed due to some PU misconfiguration."
alertDescription = "Workflow: {} could not proceed due to some PU misconfiguration,".format(workflowName)
alertDescription += "so it will be skipped."
self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,
self.alertServiceName)
self.logger.critical(alertDescription)
def alertUnknownTransferError(self, workflowName):
"""
Send alert to Prometheus with unknown transfer error
"""
alertName = "{}: Transfer request error. Workflow: {}".format(self.alertServiceName,
workflowName)
alertSeverity = "high"
alertSummary = "[MSTransferor] Unknown exception while making transfer request."
alertDescription = "Unknown exception while making Transfer request for workflow: {}".format(workflowName)
self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,
self.alertServiceName)
def alertTransferCouchDBError(self, workflowName):
"""
Send alert to Prometheus with CouchDB transfer error
"""
alertName = "{}: Failed to create a transfer document in CouchDB for workflow: {}".format(self.alertServiceName,
workflowName)
alertSeverity = "high"
alertSummary = "[MSTransferor] Transfer document could not be created in CouchDB."
alertDescription = "Workflow: {}, failed request due to error posting to CouchDB".format(workflowName)
self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,
self.alertServiceName)
self.logger.warning(alertDescription)
def alertLargeInputData(self, aboveWarningThreshold, transferId, wflowName, dataSize, dataIn):
"""
Evaluates whether the amount of data placed is too big, if so, send an alert
notification to a few persons
:param aboveWarningThreshold: boolean flag saying if the thresholds was exceeded or not
:param transferId: rule/transfer request id
:param wflowName: name of the workflow
:param dataSize: total amount of data subscribed
:param dataIn: short summary of the workflow data
"""
# Warn about data transfer subscriptions going above some threshold
if aboveWarningThreshold:
alertName = "{}: input data transfer over threshold: {}".format(self.alertServiceName,
wflowName)
alertSeverity = "high"
alertSummary = "[MS] Large pending data transfer under request id: {}".format(transferId)
alertDescription = "Workflow: {} has a large amount of ".format(wflowName)
alertDescription += "data subscribed: {} TB, ".format(teraBytes(dataSize))
alertDescription += "for {} data: {}.""".format(dataIn['type'], dataIn['name'])
self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,
self.alertServiceName)
self.logger.warning(alertDescription)
def _getValidSites(self, wflow, dataIn):
"""
Given a workflow object and the data short summary, find out
the Campaign name, the workflow SiteWhitelist, map the PSNs to
PNNs and finally remove PNNs without space
can still receive data
:param wflow: the workflow object
:param dataIn: short summary of data to be transferred
:return: a unique and ordered list of PNNs to take data
"""
campConfig = self.campaigns[dataIn['campaign']]
psns = wflow.getSitelist()
if dataIn["type"] == "primary":
if campConfig['SiteWhiteList']:
psns = set(psns) & set(campConfig['SiteWhiteList'])
if campConfig['SiteBlackList']:
psns = set(psns) - set(campConfig['SiteBlackList'])
self.logger.info(" final list of PSNs to be use: %s", psns)
pnns = self._getPNNsFromPSNs(psns)
if isRelVal(wflow.data):
self.logger.info("RelVal workflow '%s' ignores sites out of quota", wflow.getName())
return list(pnns)
self.logger.info("List of out-of-space RSEs dropped for '%s' is: %s",
wflow.getName(), pnns & self.rseQuotas.getOutOfSpaceRSEs())
return list(pnns & self.rseQuotas.getAvailableRSEs())
def createTransferDoc(self, reqName, transferRecords):
"""
Enrich the records returned from the data placement logic, wrap them up
in a single document and post it to CouchDB
:param reqName: the workflow name
:param transferRecords: list of dictionaries records, or empty if no input at all
:return: True if operation is successful, else False
"""
doc = newTransferDoc(reqName, transferRecords)
# Use the update/put method, otherwise it will fail if the document already exists
if self.reqmgrAux.updateTransferInfo(reqName, doc):
return True
self.logger.error("Failed to create transfer document in CouchDB. Will retry again later.")
return False
def _getPNNsFromPSNs(self, psnList):
"""
Given a list/set of PSNs, return a set of valid PNNs.
Note that T3, Tape and a few other PNNs are never returned.
"""
pnns = set()
for psn in psnList:
for pnn in self.psn2pnnMap.get(psn, []):
if pnn == "T2_CH_CERNBOX" or pnn.startswith("T3_"):
pass
elif pnn.endswith("_Tape") or pnn.endswith("_MSS") or pnn.endswith("_Export"):
pass
else:
pnns.add(pnn)
return pnns
def _getPSNsFromPNNs(self, pnnList):
"""
Given a list/set of PNNs, return a set of valid PSNs.
Note that T3 sites are never returned.
"""
psns = set()
for pnn in pnnList:
for psn in self.pnn2psnMap.get(pnn, []):
if psn.startswith("T3_"):
pass
else:
psns.add(psn)
return psns
def _diskPNNs(self, pnnList):
"""
Provided a list of PNN locations, return another list of
PNNs without mass storage and T3 sites
:param pnnList: list of PNN strings
:return: a set of strings with filtered out PNNs
"""
diskPNNs = set()
for pnn in pnnList:
if pnn == "T2_CH_CERNBOX" or pnn.startswith("T3_"):
pass
elif pnn.endswith("_Tape") or pnn.endswith("_MSS") or pnn.endswith("_Export"):
pass
else:
diskPNNs.add(pnn)
return diskPNNs | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/MicroService/MSTransferor/MSTransferor.py | 0.607547 | 0.15059 | MSTransferor.py | pypi |
from __future__ import division, print_function
from builtins import str as newstr, bytes, object
from future.utils import viewitems
from WMCore.MicroService.Tools.Common import getMSLogger, gigaBytes, teraBytes
class RSEQuotas(object):
"""
Class which represents a list of RSEs, their quota and
their storage usage
"""
def __init__(self, dataAcct, quotaFraction, **kwargs):
"""
Executes a basic setup, including proper logging.
:param dataAcct: string with the Rucio account
:param quotaFraction: float point number representing the fraction of the quota
:param kwargs: the supported keyword arguments are:
minimumThreshold: integer value defining the minimum available space required
verbose: logger verbosity
logger: logger object
"""
self.dataAcct = dataAcct
self.quotaFraction = quotaFraction
self.minimumSpace = kwargs["minimumThreshold"]
self.logger = getMSLogger(kwargs.get("verbose"), kwargs.get("logger"))
msg = "RSEQuotas started with parameters: dataAcct=%s, quotaFraction=%s, "
msg += "minimumThreshold=%s GB"
self.logger.info(msg, dataAcct, quotaFraction, gigaBytes(self.minimumSpace))
self.nodeUsage = {}
self.availableRSEs = set()
self.outOfSpaceNodes = set()
def __str__(self):
"""
Write out useful information for this object
:return: a stringified dictionary
"""
res = {'dataAcct': self.dataAcct, 'quotaFraction': self.quotaFraction,
'minimumSpace': self.minimumSpace}
return str(res)
def getNodeUsage(self):
"""
Return a dictionary of RSEs and a few storage statistics
"""
return self.nodeUsage
def getAvailableRSEs(self):
"""
Return a list of out-of-space RSE/PNNs
"""
return self.availableRSEs
def getOutOfSpaceRSEs(self):
"""
Return a list of out-of-space RSE/PNNs
"""
return self.outOfSpaceNodes
def fetchStorageQuota(self, dataSvcObj):
"""
Fetch the storage quota/limit for a given Rucio account.
:param dataSvcObj: object instance for the Rucio data service
:return: create an instance cache structure to keep track of quota
and available storage. The structure is as follows:
{"pnn_name": {"quota": quota in bytes for the rucio account,
"bytes_limit": total space for the account/group,
"bytes": amount of bytes currently used/archived,
"bytes_remaining": space remaining for the acct/group,
"quota_avail": a fraction of the quota that we will use}
"""
self.nodeUsage.clear()
response = dataSvcObj.getAccountLimits(self.dataAcct)
for rse, quota in viewitems(response):
if rse.endswith("_Tape") or rse.endswith("_Export"):
continue
self.nodeUsage.setdefault(rse, {})
self.nodeUsage[rse] = dict(quota=int(quota),
bytes_limit=int(quota),
bytes=0,
bytes_remaining=int(quota), # FIXME: always 0
quota_avail=0)
self.logger.info("Storage quota filled from Rucio")
def fetchStorageUsage(self, dataSvcObj):
"""
Fetch the storage usage from Rucio, which will then
be used as part of the data placement mechanism.
Also calculate the available quota - given the configurable quota
fraction - and mark RSEs with less than 1TB available as NOT usable.
:param dataSvcObj: object instance for the data service
Keys definition is:
* quota: the Rucio account limit
* bytes_limit: the account quota from Rucio
* bytes: data volume placed by Rucio
* bytes_remaining: storage available for our account/group
* quota_avail: space left (in bytes) that we can use for data placement
:return: update our cache in place with up-to-date values, in the format of:
{"pnn_name": {"bytes_limit": total space for the account/group,
"bytes": amount of bytes currently used/archived,
"bytes_remaining": space remaining for the acct/group}
"""
self.logger.info("Using Rucio for storage usage, with acct: %s", self.dataAcct)
for item in dataSvcObj.getAccountUsage(self.dataAcct):
if item['rse'] not in self.nodeUsage:
self.logger.warning("Rucio RSE: %s has data usage but no quota available.", item['rse'])
continue
# bytes_limit is always 0, so skip it and use whatever came from the limits call
# bytes_remaining is always negative, so calculate it based on the limits
quota = self.nodeUsage[item['rse']]['quota']
self.nodeUsage[item['rse']].update({'bytes': item['bytes'],
'bytes_remaining': quota - item['bytes']})
def evaluateQuotaExceeded(self):
"""
Goes through every single site, their quota and their remaining
storage; and mark those with less than X TB available (1TB at the
moment) as not eligible to receive data
:return: updates instance structures in place
"""
self.availableRSEs.clear()
self.outOfSpaceNodes.clear()
# given a configurable sub-fraction of our quota, recalculate how much storage is left
for rse, info in viewitems(self.nodeUsage):
quotaAvail = info['quota'] * self.quotaFraction
info['quota_avail'] = min(quotaAvail, info['bytes_remaining'])
if info['quota_avail'] < self.minimumSpace:
self.outOfSpaceNodes.add(rse)
else:
self.availableRSEs.add(rse)
self.logger.info("Currently %d nodes are out of space.", len(self.outOfSpaceNodes))
def printQuotaSummary(self):
"""
Print a summary of the current quotas, space usage and space available
"""
self.logger.info("Summary of the current quotas in Terabytes:")
for node in sorted(self.nodeUsage.keys()):
msg = " %s:\t\tbytes_limit: %.2f, bytes_used: %.2f, bytes_remaining: %.2f, "
msg += "quota: %.2f, quota_avail: %.2f"
self.logger.info(msg, node, teraBytes(self.nodeUsage[node]['bytes_limit']),
teraBytes(self.nodeUsage[node]['bytes']),
teraBytes(self.nodeUsage[node]['bytes_remaining']),
teraBytes(self.nodeUsage[node]['quota']),
teraBytes(self.nodeUsage[node]['quota_avail']))
self.logger.info("List of RSE's out of quota: %s", self.outOfSpaceNodes)
def updateNodeUsage(self, node, dataSize):
"""
Provided a RSE/PNN name and the data size, in bytes, update the node
storage usage by subtracting it from the current available quota.
If it gets a list of nodes, the same dataSize is accounted for all
of them.
:param node: string with the PNN/RSE
:param dataSize: integer with the amount of bytes allocated
:return: nothing. updates nodeUsage cache
"""
if isinstance(node, (newstr, bytes)):
node = [node]
if not isinstance(dataSize, int):
self.logger.error("dataSize needs to be integer, not '%s'!", type(dataSize))
for rse in node:
self.nodeUsage[rse]['quota_avail'] -= dataSize | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/MicroService/MSTransferor/DataStructs/RSEQuotas.py | 0.78572 | 0.421552 | RSEQuotas.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.