code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from builtins import range
from WMCore.DataStructs.Run import Run
class Mask(dict):
"""
_Mask_
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
self.inclusive = True
self.setdefault("inclusivemask", True)
self.setdefault("FirstEvent", None)
self.setdefault("LastEvent", None)
self.setdefault("FirstLumi", None)
self.setdefault("LastLumi", None)
self.setdefault("FirstRun", None)
self.setdefault("LastRun", None)
self.setdefault("runAndLumis", {})
def setMaxAndSkipEvents(self, maxEvents, skipEvents):
"""
_setMaxAndSkipEvents_
Set FirstEvent & LastEvent fields as max & skip events
"""
self['FirstEvent'] = skipEvents
if maxEvents is not None:
self['LastEvent'] = skipEvents + maxEvents
return
def setMaxAndSkipLumis(self, maxLumis, skipLumi):
"""
_setMaxAndSkipLumis
Set the Maximum number of lumi sections and the starting point
"""
self['FirstLumi'] = skipLumi
self['LastLumi'] = skipLumi + maxLumis
return
def setMaxAndSkipRuns(self, maxRuns, skipRun):
"""
_setMaxAndSkipRuns
Set the Maximum number of runss and the starting point
"""
self['FirstRun'] = skipRun
self['LastRun'] = skipRun + maxRuns
return
def getMaxEvents(self):
"""
_getMaxEvents_
return maxevents setting
"""
if self['LastEvent'] is None or self['FirstEvent'] is None:
return None
return self['LastEvent'] - self['FirstEvent'] + 1
def getMax(self, keyType=None):
"""
_getMax_
returns the maximum number of runs/events/etc of the type of the type string
"""
if 'First%s' % (keyType) not in self:
return None
if self['First%s' % (keyType)] is None or self['Last%s' % (keyType)] is None:
return None
return self['Last%s' % (keyType)] - self['First%s' % (keyType)] + 1
def addRun(self, run):
"""
_addRun_
Add a run object
"""
run.lumis.sort()
firstLumi = run.lumis[0]
lastLumi = run.lumis[0]
for lumi in run.lumis:
if lumi <= lastLumi + 1:
lastLumi = lumi
else:
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
firstLumi = lumi
lastLumi = lumi
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
return
def addRunWithLumiRanges(self, run, lumiList):
"""
_addRunWithLumiRanges_
Add to runAndLumis with call signature
addRunWithLumiRanges(run=run, lumiList = [[start1,end1], [start2, end2], ...]
"""
self['runAndLumis'][run] = lumiList
return
def addRunAndLumis(self, run, lumis=None):
"""
_addRunAndLumis_
Add runs and lumis directly
TODO: The name of this function is a little misleading. If you pass a list of lumis
it ignores the content of the list and adds a range based on the max/min in
the list. Missing lumis in the list are ignored.
NOTE: If the new run/lumi range overlaps with the pre-existing lumi ranges in the
mask, no attempt is made to merge these together. This can result in a mask
with duplicate lumis.
"""
lumis = lumis or []
if not isinstance(lumis, list):
lumis = list(lumis)
if run not in self['runAndLumis']:
self['runAndLumis'][run] = []
self['runAndLumis'][run].append([min(lumis), max(lumis)])
return
def getRunAndLumis(self):
"""
_getRunAndLumis_
Return list of active runs and lumis
"""
return self['runAndLumis']
def runLumiInMask(self, run, lumi):
"""
_runLumiInMask_
See if a particular runLumi is in the mask
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return True
if run not in self['runAndLumis']:
return False
for pair in self['runAndLumis'][run]:
# Go through each max and min pair
if pair[0] <= lumi and pair[1] >= lumi:
# Then the lumi is bracketed
return True
return False
def filterRunLumisByMask(self, runs):
"""
_filterRunLumisByMask_
Pass a Mask a list of run objects, get back a list of
run objects that correspond to the actual mask allowed values
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return runs
runDict = {}
for r in runs:
if r.run in runDict:
runDict[r.run].extendLumis(r.lumis)
else:
runDict[r.run] = r
maskRuns = set(self["runAndLumis"].keys())
passedRuns = set([r.run for r in runs])
filteredRuns = maskRuns.intersection(passedRuns)
newRuns = set()
for runNumber in filteredRuns:
maskLumis = set()
for pair in self["runAndLumis"][runNumber]:
if pair[0] == pair[1]:
maskLumis.add(pair[0])
else:
maskLumis = maskLumis.union(list(range(pair[0], pair[1] + 1)))
filteredLumis = set(runDict[runNumber].lumis).intersection(maskLumis)
if len(filteredLumis) > 0:
filteredLumiEvents = [(lumi, runDict[runNumber].getEventsByLumi(lumi)) for lumi in filteredLumis]
newRuns.add(Run(runNumber, *filteredLumiEvents))
return newRuns | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/DataStructs/Mask.py | 0.704567 | 0.254295 | Mask.py | pypi |
from __future__ import absolute_import, division, print_function
from future.utils import listitems
import sys
import hashlib
import time
from functools import total_ordering
from Utils.Utilities import encodeUnicodeToBytes
from WMCore.DataStructs.WMObject import WMObject
@total_ordering
class WorkUnit(WMObject, dict):
"""
_WorkUnit_
Data object that contains details for a single work unit
corresponding to tables workunit and frl_workunit_assoc
"""
fieldsToCopy = ['taskid', 'retry_count', 'last_unit_count', 'last_submit_time', 'status', 'firstevent',
'lastevent', 'fileid']
fieldsForInfo = fieldsToCopy + ['run_lumi']
def __init__(self, taskID=None, retryCount=0, lastUnitCount=None, lastSubmitTime=int(time.time()),
status=0, firstEvent=1, lastEvent=sys.maxsize, fileid=None, runLumi=None):
super(WorkUnit, self).__init__(self)
self.setdefault('taskid', taskID)
self.setdefault('retry_count', retryCount)
self.setdefault('last_unit_count', lastUnitCount)
self.setdefault('last_submit_time', lastSubmitTime)
self.setdefault('status', status)
self.setdefault('firstevent', firstEvent)
self.setdefault('lastevent', lastEvent)
self.setdefault('fileid', fileid)
self.setdefault('run_lumi', runLumi)
def __lt__(self, rhs):
"""
Compare work units in task id, run, lumi, first event, last event
"""
if self['taskid'] != rhs['taskid']:
return self['taskid'] < rhs['taskid']
if self['run_lumi'].run != rhs['run_lumi'].run:
return self['run_lumi'].run < rhs['run_lumi'].run
if self['run_lumi'].lumis != rhs['run_lumi'].lumis:
return self['run_lumi'].lumis < rhs['run_lumi'].lumis
if self['first_event'] != rhs['first_event']:
return self['first_event'] < rhs['first_event']
return self['last_event'] < rhs['last_event']
def __eq__(self, rhs):
"""
Work unit is equal if it has the same task, run, and lumi
"""
return (self['taskid'] == rhs['taskid'] and self['run_lumi'].run == self['run_lumi'].run and
self['run_lumi'].lumis == self['run_lumi'].lumis and self['firstevent'] == rhs['firstevent'] and
self['lastevent'] == rhs['lastevent'])
def __hash__(self):
"""
Hash function for this dict.
"""
# Generate an immutable sorted string representing this object
# NOTE: the run object needs to be hashed
immutableSelf = []
for keyName in sorted(self):
if keyName == "run_lumi":
immutableSelf.append((keyName, hash(self[keyName])))
else:
immutableSelf.append((keyName, self[keyName]))
hashValue = hashlib.sha1(encodeUnicodeToBytes(str(immutableSelf)))
return int(hashValue.hexdigest()[:15], 16)
def json(self, thunker=None):
"""
_json_
Serialize the object. Only copy select fields and construct one new field.
"""
jsonDict = {k: self[k] for k in WorkUnit.fieldsToCopy}
jsonDict["run_lumi"] = {"run_number": self['run_lumi'].run, "lumis": self['run_lumi'].lumis}
return jsonDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker)
def getInfo(self):
"""
Returns: tuple of parameters for the work unit
"""
return tuple(self[x] for x in WorkUnit.fieldsForInfo) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/DataStructs/WorkUnit.py | 0.592195 | 0.15241 | WorkUnit.py | pypi |
from builtins import str, bytes
__all__ = []
from WMCore.DataStructs.Run import Run
from WMCore.DataStructs.WMObject import WMObject
class File(WMObject, dict):
"""
_File_
Data object that contains details for a single file
TODO
- use the decorator `from functools import total_ordering` after
dropping support for python 2.6
- then, drop __ne__, __le__, __gt__, __ge__
"""
def __init__(self, lfn="", size=0, events=0, checksums=None,
parents=None, locations=None, merged=False):
dict.__init__(self)
checksums = checksums or {}
self.setdefault("lfn", lfn)
self.setdefault("size", size)
self.setdefault("events", events)
self.setdefault("checksums", checksums)
self.setdefault('runs', set())
self.setdefault('merged', merged)
self.setdefault('last_event', 0)
self.setdefault('first_event', 0)
if locations is None:
self.setdefault("locations", set())
else:
self.setdefault("locations", locations)
if parents is None:
self.setdefault("parents", set())
else:
self.setdefault("parents", parents)
def addRun(self, run):
"""
_addRun_
run should be an instance of WMCore.DataStructs.Run
Add a run container to this file, tweak the run and lumi
keys to be max run and max lumi for backwards compat.
"""
if not isinstance(run, Run):
msg = "addRun argument must be of type WMCore.DataStructs.Run"
raise RuntimeError(msg)
addFlag = False
for runMember in self['runs']:
if runMember.run == run.run:
# this rely on Run object overwrite __add__ to update self
runMember + run
addFlag = True
if not addFlag:
self['runs'].add(run)
return
def load(self):
"""
A DataStructs file has nothing to load from, other implementations will
over-ride this method.
"""
if self['id']:
self['lfn'] = '/store/testing/%s' % self['id']
def save(self):
"""
A DataStructs file has nothing to save to, other implementations will
over-ride this method.
"""
pass
def setLocation(self, pnn):
# Make sure we don't add None, [], "" as file location
if pnn:
self['locations'] = self['locations'] | set(self.makelist(pnn))
def __eq__(self, rhs):
"""
File is equal if it has the same name
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] == rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] == rhs
return eq
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
thisHash = self['lfn'].__hash__()
return thisHash
def __lt__(self, rhs):
"""
Sort files based on lexicographical ordering of the value connected
to the 'lfn' key
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] < rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] < rhs
return eq
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
def json(self, thunker=None):
"""
_json_
Serialize the file object. This will convert all Sets() to lists and
weed out the internal data structures that don't need to be shared.
"""
fileDict = {"last_event": self["last_event"],
"first_event": self["first_event"],
"lfn": self["lfn"],
"locations": list(self["locations"]),
"id": self.get("id", None),
"checksums": self["checksums"],
"events": self["events"],
"merged": self["merged"],
"size": self["size"],
"runs": [],
"parents": []}
for parent in self["parents"]:
if isinstance(parent, (str, bytes)):
# Then for some reason, we're passing strings
# Done specifically for ErrorHandler
fileDict['parents'].append(parent)
elif thunker is None:
continue
else:
fileDict["parents"].append(thunker._thunk(parent))
for run in self["runs"]:
runDict = {"run_number": run.run,
"lumis": run.lumis}
fileDict["runs"].append(runDict)
return fileDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/DataStructs/File.py | 0.533884 | 0.174868 | File.py | pypi |
from builtins import str
from WMCore.DataStructs.WMObject import WMObject
class SummaryHistogram(WMObject):
"""
_SummaryHistogram_
Histogram object, provides familiar CRUD methods
which take care of most of the statistical
calculations when adding points, this object
can also be converted into a dictionary
for JSON documents. It knows how to combine
with other histograms and create itself from
a dictionary provided it has matching structure.
This is an interface, the real work is done
by the ContinuousSummaryHistogram and
DiscreteSummaryHistogram objects
"""
def __init__(self, title = None, xLabel = None):
"""
__init__
Initialize the elements in the object.
"""
# Meta-information about the histogram, it can be changed at any point
self.title = title
self.xLabel = xLabel
# These shouldn't be touched from anything outside the SummaryHistogram object and children classes
self.continuous = None
self.jsonInternal = None
self.data = {}
self.average = None
self.stdDev = None
return
def setTitle(self, newTitle):
"""
_setTitle_
Set the title
"""
self.title = newTitle
return
def setHorizontalLabel(self, xLabel):
"""
_setHorizontalLabel_
Set the label on the x axis
"""
self.xLabel = xLabel
return
def addPoint(self, xValue, yLabel):
"""
_addPoint_
Add a point to the histogram data, a histogram
can have many types of y values for the same x if
x is continuous otherwise it is only one yLabel.
They should be in a similar scale for best results.
"""
raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation")
def toJSON(self):
"""
_toJSON_
Return a dictionary which is compatible
with a JSON object
"""
if self.continuous is None:
raise TypeError("toJSON can't be called on a bare SummaryHistogram object")
# Get what the children classes did
jsonDict = {}
jsonDict['internalData'] = self.jsonInternal or {}
# Add the common things
jsonDict['title'] = self.title
jsonDict['xLabel'] = self.xLabel
jsonDict['continuous'] = self.continuous
jsonDict['data'] = self.data
jsonDict['stdDev'] = self.stdDev
jsonDict['average'] = self.average
return jsonDict
def __add__(self, other):
"""
__add__
Add two histograms, combine statistics.
"""
raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation")
def __str__(self):
"""
__str__
Return the str object of the JSON
"""
return str(self.toJSON()) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/DataStructs/MathStructs/SummaryHistogram.py | 0.844601 | 0.581749 | SummaryHistogram.py | pypi |
from __future__ import division
import math
from WMCore.DataStructs.MathStructs.SummaryHistogram import SummaryHistogram
from WMCore.Algorithms.MathAlgos import validateNumericInput
from WMCore.Algorithms.MathAlgos import calculateRunningAverageAndQValue, calculateStdDevFromQ
class ContinuousSummaryHistogram(SummaryHistogram):
"""
_ContinuousSummaryHistogram_
A histogram where there are continuous points
with certain frequency, it follows
that there is only one value in Y and
that the average and standard deviation are
not calculated on the frequency values but the X values.
"""
def __init__(self, title, xLabel, yLabel = None,
roundingDecimals = 2, nBins = None,
dropOutliers = False, sigmaLimit = 5,
storeHistogram = True):
"""
__init__
Initialize a more complex histogram structure, containing different
data to calculate online average and standard deviations. This data is also
stored in the JSON to allow rebuilding and adding histograms.
All histograms are binned when requested, the resolution can be specified
through nBins, otherwise the value used is the one recommended in:
Wand, M.P. (1997), "Data-Based Choice of Histogram Bin Width," The American Statistician, 51, 59-64.
If specified, outlier farther than sigmaLimit standard deviations from the
mean will not be included in the binned histogram.
"""
# Initialize the parent object
SummaryHistogram.__init__(self, title, xLabel)
# Indicate this is a discrete histogram
self.continuous = True
# Add data only used in the continuous version
self.yLabel = yLabel
self.nPoints = 0
self.QValue = None
self.average = None
# Configuration parameters for the continuous histograms
self.roundingDecimals = roundingDecimals
self.fixedNBins = nBins
self.dropOutliers = dropOutliers
self.sigmaLimit = sigmaLimit
self.binned = False
self.storeHistogram = storeHistogram
# Override initialization of some attributes
self.average = 0.0
self.stdDev = 0.0
return
def addPoint(self, xValue, yLabel = None):
"""
_addPoint_
Add a point from a continuous set (only-numbers allowed currently) to the histogram data,
calculate the running average and standard deviation.
If no y-label had been specified before, one must be supplied
otherwise the given y-label must be either None or equal
to the stored value.
"""
if self.binned:
# Points can't be added to binned histograms!
raise Exception("Points can't be added to binned histograms")
if self.yLabel is None and yLabel is None:
raise Exception("Some y-label must be stored for the histogram")
elif self.yLabel is None:
self.yLabel = yLabel
elif yLabel is not None and self.yLabel != yLabel:
raise Exception("Only one y-label is allowed on continuous histograms")
if not validateNumericInput(xValue):
# Do nothing if it is not a number
return
xValue = float(xValue)
xValue = round(xValue, self.roundingDecimals)
if self.storeHistogram:
if xValue not in self.data:
self.data[xValue] = 0
self.data[xValue] += 1
self.nPoints += 1
(self.average, self.QValue) = calculateRunningAverageAndQValue(xValue, self.nPoints, self.average, self.QValue)
return
def __add__(self, other):
#TODO: For HG1302, support multiple agents properly in the workload summary
raise NotImplementedError
def toJSON(self):
"""
_toJSON_
Bin the histogram if any, calculate the standard deviation. Store
the internal data needed for reconstruction of the histogram
from JSON and call superclass toJSON method.
"""
if self.nPoints:
self.stdDev = calculateStdDevFromQ(self.QValue, self.nPoints)
if not self.binned and self.storeHistogram:
self.binHistogram()
self.jsonInternal = {}
self.jsonInternal['yLabel'] = self.yLabel
self.jsonInternal['QValue'] = self.QValue
self.jsonInternal['nPoints'] = self.nPoints
return SummaryHistogram.toJSON(self)
def binHistogram(self):
"""
_binHistogram_
Histograms of continuous data must be binned,
this takes care of that using given or optimal parameters.
Note that this modifies the data object,
and points can't be added to the histogram after this.
"""
if not self.nPoints:
return
self.binned = True
# Number of bins can be specified or calculated based on number of points
nBins = self.fixedNBins
if nBins is None:
nBins = int(math.floor((5.0 / 3.0) * math.pow(self.nPoints, 1.0 / 3.0)))
# Define min and max
if not self.dropOutliers:
upperLimit = max(self.data.keys())
lowerLimit = min(self.data.keys())
else:
stdDev = calculateStdDevFromQ(self.QValue, self.nPoints)
upperLimit = self.average + (stdDev * self.sigmaLimit)
lowerLimit = self.average - (stdDev * self.sigmaLimit)
# Incremental delta
delta = abs(float(upperLimit - lowerLimit)) / nBins
# Build the bins, it's a list of tuples for now
bins = []
a = lowerLimit
b = lowerLimit + delta
while len(bins) < nBins:
bins.append((a, b))
a += delta
b += delta
# Go through data and populate the binned histogram
binnedHisto = {}
currentBin = 0
currentPoint = 0
sortedData = sorted(self.data.keys())
while currentPoint < len(sortedData):
point = sortedData[currentPoint]
encodedTuple = "%s,%s" % (bins[currentBin][0], bins[currentBin][1])
if encodedTuple not in binnedHisto:
binnedHisto[encodedTuple] = 0
if point > upperLimit or point < lowerLimit:
currentPoint += 1
elif currentBin == len(bins) - 1:
binnedHisto[encodedTuple] += self.data[point]
currentPoint += 1
elif point >= bins[currentBin][0] and point < bins[currentBin][1]:
binnedHisto[encodedTuple] += self.data[point]
currentPoint += 1
else:
currentBin += 1
self.data = binnedHisto
return | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/DataStructs/MathStructs/ContinuousSummaryHistogram.py | 0.753058 | 0.633354 | ContinuousSummaryHistogram.py | pypi |
from WMCore.DataStructs.MathStructs.SummaryHistogram import SummaryHistogram
from WMCore.Algorithms.MathAlgos import getAverageStdDev
class DiscreteSummaryHistogram(SummaryHistogram):
"""
_DiscreteSummaryHistogram_
A histogram where the data is organized by
a finite number of categories, it can have
many values for each category.
"""
def __init__(self, title, xLabel):
"""
__init__
Initialize a simpler histogram that only stores
the histogram. Everything else is calculated when the JSON is requested.
"""
# Initialize the parent object
SummaryHistogram.__init__(self, title, xLabel)
# Indicate this is a discrete histogram
self.continuous = False
# Add data only used in the discrete version
self.yLabels = set()
# Override initialization of some attributes
self.average = {}
self.stdDev = {}
return
def addPoint(self, xValue, yLabel):
"""
_addPoint_
Add point to discrete histogram,
x value is a category and therefore not rounded.
There can be many yLabel and standard deviations are
not calculated online. Histograms are always stored.
"""
if xValue not in self.data:
# Record the category
self.data[xValue] = {}
for label in self.yLabels:
self.data[xValue][label] = 0
if yLabel not in self.yLabels:
# Record the label
self.yLabels.add(yLabel)
self.average[yLabel] = 0.0
self.stdDev[yLabel] = 0.0
for category in self.data:
self.data[category][yLabel] = 0
self.data[xValue][yLabel] += 1
return
def __add__(self, other):
#TODO: For HG1302, support multiple agents properly in the workload summary
raise NotImplementedError
def toJSON(self):
"""
_toJSON_
Calculate average and standard deviation, store it
and call the parent class toJSON method
"""
for yLabel in self.yLabels:
numList = []
for xValue in self.data:
numList.append(self.data[xValue][yLabel])
(self.average[yLabel], self.stdDev[yLabel]) = getAverageStdDev(numList)
return SummaryHistogram.toJSON(self) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/src/python/WMCore/DataStructs/MathStructs/DiscreteSummaryHistogram.py | 0.702122 | 0.544801 | DiscreteSummaryHistogram.py | pypi |
import logging
import sys
from collections import Counter
from WMCore.Services.DBS.DBS3Reader import DBS3Reader
from WMCore.Services.Rucio.Rucio import Rucio
RUCIO_ACCT = "wma_prod"
RUCIO_HOST = "http://cms-rucio.cern.ch"
RUCIO_AUTH = "https://cms-rucio-auth.cern.ch"
DBS_URL = "https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader"
def loggerSetup(logLevel=logging.INFO):
"""
Return a logger which writes everything to stdout.
"""
logger = logging.getLogger(__name__)
outHandler = logging.StreamHandler(sys.stdout)
outHandler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(module)s: %(message)s"))
outHandler.setLevel(logLevel)
logger.addHandler(outHandler)
logger.setLevel(logLevel)
return logger
def getFromRucio(dataset, logger):
"""
Using the WMCore Rucio object and fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, value is the amount of files.
"""
rucio = Rucio(acct=RUCIO_ACCT,
hostUrl=RUCIO_HOST,
authUrl=RUCIO_AUTH,
configDict={'logger': logger})
result = dict()
for block in rucio.getBlocksInContainer(dataset):
data = rucio.getDID(block)
result.setdefault(block, data['length'])
return result
def getFromDBS(dataset, logger):
"""
Uses the WMCore DBS3Reader object to fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, and an inner dictionary
with the number of valid and invalid files. It also returns a total counter
for the number of valid and invalid files in the dataset.
"""
dbsReader = DBS3Reader(DBS_URL, logger)
result = dict()
dbsFilesCounter = Counter({'valid': 0, 'invalid': 0})
blocks = dbsReader.listFileBlocks(dataset)
for block in blocks:
data = dbsReader.dbs.listFileArray(block_name=block, validFileOnly=0, detail=True)
result.setdefault(block, Counter({'valid': 0, 'invalid': 0}))
for fileInfo in data:
if fileInfo['is_file_valid'] == 1:
result[block]['valid'] += 1
dbsFilesCounter['valid'] += 1
else:
result[block]['invalid'] += 1
dbsFilesCounter['invalid'] += 1
return result, dbsFilesCounter
def main():
"""
Expects a dataset name as input argument.
It then queries Rucio and DBS and compare their blocks and
number of files.
"""
if len(sys.argv) != 2:
print("A dataset name must be provided in the command line")
sys.exit(1)
datasetName = sys.argv[1]
logger = loggerSetup(logging.INFO)
rucioOutput = getFromRucio(datasetName, logger)
dbsOutput, dbsFilesCounter = getFromDBS(datasetName, logger)
logger.info("*** Dataset: %s", datasetName)
logger.info("Rucio file count : %s", sum(rucioOutput.values()))
logger.info("DBS file count : %s", dbsFilesCounter['valid'] + dbsFilesCounter['invalid'])
logger.info(" - valid files : %s", dbsFilesCounter['valid'])
logger.info(" - invalid files : %s", dbsFilesCounter['invalid'])
logger.info("Blocks in Rucio but not in DBS: %s", set(rucioOutput.keys()) - set(dbsOutput.keys()))
logger.info("Blocks in DBS but not in Rucio: %s", set(dbsOutput.keys()) - set(rucioOutput.keys()))
for blockname in rucioOutput:
if blockname not in dbsOutput:
logger.error("This block does not exist in DBS: %s", blockname)
continue
if rucioOutput[blockname] != sum(dbsOutput[blockname].values()):
logger.warning("Block with file mismatch: %s", blockname)
logger.warning("\tRucio: %s\t\tDBS: %s", rucioOutput[blockname], sum(dbsOutput[blockname].values()))
if __name__ == "__main__":
sys.exit(main()) | /reqmgr2ms-transferor-2.2.4rc2.tar.gz/reqmgr2ms-transferor-2.2.4rc2/bin/adhoc-scripts/checkDsetFileCount.py | 0.413477 | 0.306037 | checkDsetFileCount.py | pypi |
from textwrap import TextWrapper
from collections import OrderedDict
def twClosure(replace_whitespace=False,
break_long_words=False,
maxWidth=120,
maxLength=-1,
maxDepth=-1,
initial_indent=''):
"""
Deals with indentation of dictionaries with very long key, value pairs.
replace_whitespace: Replace each whitespace character with a single space.
break_long_words: If True words longer than width will be broken.
width: The maximum length of wrapped lines.
initial_indent: String that will be prepended to the first line of the output
Wraps all strings for both keys and values to 120 chars.
Uses 4 spaces indentation for both keys and values.
Nested dictionaries and lists go to next line.
"""
twr = TextWrapper(replace_whitespace=replace_whitespace,
break_long_words=break_long_words,
width=maxWidth,
initial_indent=initial_indent)
def twEnclosed(obj, ind='', depthReached=0, reCall=False):
"""
The inner function of the closure
ind: Initial indentation for the single output string
reCall: Flag to indicate a recursive call (should not be used outside)
"""
output = ''
if isinstance(obj, dict):
obj = OrderedDict(sorted(list(obj.items()),
key=lambda t: t[0],
reverse=False))
if reCall:
output += '\n'
ind += ' '
depthReached += 1
lengthReached = 0
for key, value in list(obj.items()):
lengthReached += 1
if lengthReached > maxLength and maxLength >= 0:
output += "%s...\n" % ind
break
if depthReached <= maxDepth or maxDepth < 0:
output += "%s%s: %s" % (ind,
''.join(twr.wrap(key)),
twEnclosed(value, ind, depthReached=depthReached, reCall=True))
elif isinstance(obj, (list, set)):
if reCall:
output += '\n'
ind += ' '
lengthReached = 0
for value in obj:
lengthReached += 1
if lengthReached > maxLength and maxLength >= 0:
output += "%s...\n" % ind
break
if depthReached <= maxDepth or maxDepth < 0:
output += "%s%s" % (ind, twEnclosed(value, ind, depthReached=depthReached, reCall=True))
else:
output += "%s\n" % str(obj) # join(twr.wrap(str(obj)))
return output
return twEnclosed
def twPrint(obj, maxWidth=120, maxLength=-1, maxDepth=-1):
"""
A simple caller of twClosure (see docstring for twClosure)
"""
twPrinter = twClosure(maxWidth=maxWidth,
maxLength=maxLength,
maxDepth=maxDepth)
print(twPrinter(obj))
def twFormat(obj, maxWidth=120, maxLength=-1, maxDepth=-1):
"""
A simple caller of twClosure (see docstring for twClosure)
"""
twFormatter = twClosure(maxWidth=maxWidth,
maxLength=maxLength,
maxDepth=maxDepth)
return twFormatter(obj) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/Utils/TwPrint.py | 0.757436 | 0.299387 | TwPrint.py | pypi |
import io
import os
import stat
import subprocess
import time
import zlib
from Utils.Utilities import decodeBytesToUnicode
def calculateChecksums(filename):
"""
_calculateChecksums_
Get the adler32 and crc32 checksums of a file. Return None on error
Process line by line and adjust for known signed vs. unsigned issues
http://docs.python.org/library/zlib.html
The cksum UNIX command line tool implements a CRC32 checksum that is
different than any of the python algorithms, therefore open cksum
in a subprocess and feed it the same chunks of data that are used
to calculate the adler32 checksum.
"""
adler32Checksum = 1 # adler32 of an empty string
cksumProcess = subprocess.Popen("cksum", stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# the lambda basically creates an iterator function with zero
# arguments that steps through the file in 4096 byte chunks
with open(filename, 'rb') as f:
for chunk in iter((lambda: f.read(4096)), b''):
adler32Checksum = zlib.adler32(chunk, adler32Checksum)
cksumProcess.stdin.write(chunk)
cksumProcess.stdin.close()
cksumProcess.wait()
cksumStdout = cksumProcess.stdout.read().split()
cksumProcess.stdout.close()
# consistency check on the cksum output
filesize = os.stat(filename)[stat.ST_SIZE]
if len(cksumStdout) != 2 or int(cksumStdout[1]) != filesize:
raise RuntimeError("Something went wrong with the cksum calculation !")
cksumStdout[0] = decodeBytesToUnicode(cksumStdout[0])
return (format(adler32Checksum & 0xffffffff, '08x'), cksumStdout[0])
def tail(filename, nLines=20):
"""
_tail_
A version of tail
Adapted from code on http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
assert nLines >= 0
pos, lines = nLines + 1, []
# make sure only valid utf8 encoded chars will be passed along
with io.open(filename, 'r', encoding='utf8', errors='ignore') as f:
while len(lines) <= nLines:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = list(f)
pos *= 2
text = "".join(lines[-nLines:])
return text
def getFileInfo(filename):
"""
_getFileInfo_
Return file info in a friendly format
"""
filestats = os.stat(filename)
fileInfo = {'Name': filename,
'Size': filestats[stat.ST_SIZE],
'LastModification': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_MTIME])),
'LastAccess': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_ATIME]))}
return fileInfo
def findMagicStr(filename, matchString):
"""
_findMagicStr_
Parse a log file looking for a pattern string
"""
with io.open(filename, 'r', encoding='utf8', errors='ignore') as logfile:
# TODO: can we avoid reading the whole file
for line in logfile:
if matchString in line:
yield line
def getFullPath(name, envPath="PATH"):
"""
:param name: file name
:param envPath: any environment variable specified for path (PATH, PYTHONPATH, etc)
:return: full path if it is under PATH env
"""
for path in os.getenv(envPath).split(os.path.pathsep):
fullPath = os.path.join(path, name)
if os.path.exists(fullPath):
return fullPath
return None | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/Utils/FileTools.py | 0.555556 | 0.398641 | FileTools.py | pypi |
from builtins import str, bytes
def portForward(port):
"""
Decorator wrapper function for port forwarding of the REST calls of any
function to a given port.
Currently there are three constraints for applying this decorator.
1. The function to be decorated must be defined within a class and not being a static method.
The reason for that is because we need to be sure the function's signature will
always include the class instance as its first argument.
2. The url argument must be present as the second one in the positional argument list
of the decorated function (right after the class instance argument).
3. The url must follow the syntax specifications in RFC 1808:
https://tools.ietf.org/html/rfc1808.html
If all of the above constraints are fulfilled and the url is part of the
urlMangleList, then the url is parsed and the port is substituted with the
one provided as an argument to the decorator's wrapper function.
param port: The port to which the REST call should be forwarded.
"""
def portForwardDecorator(callFunc):
"""
The actual decorator
"""
def portMangle(callObj, url, *args, **kwargs):
"""
Function used to check if the url coming with the current argument list
is to be forwarded and if so change the port to the one provided as an
argument to the decorator wrapper.
:param classObj: This is the class object (slef from within the class)
which is always to be present in the signature of a
public method. We will never use this argument, but
we need it there for not breaking the positional
argument order
:param url: This is the actual url to be (eventually) forwarded
:param *args: The positional argument list coming from the original function
:param *kwargs: The keywords argument list coming from the original function
"""
forwarded = False
try:
if isinstance(url, str):
urlToMangle = 'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace('.cern.ch/', '.cern.ch:%d/' % port, 1)
forwarded = True
elif isinstance(url, bytes):
urlToMangle = b'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace(b'.cern.ch/', b'.cern.ch:%d/' % port, 1)
forwarded = True
except Exception:
pass
if forwarded:
return callFunc(callObj, newUrl, *args, **kwargs)
else:
return callFunc(callObj, url, *args, **kwargs)
return portMangle
return portForwardDecorator
class PortForward():
"""
A class with a call method implementing a simple way to use the functionality
provided by the protForward decorator as a pure functional call:
EXAMPLE:
from Utils.PortForward import PortForward
portForwarder = PortForward(8443)
url = 'https://cmsweb-testbed.cern.ch/couchdb'
url = portForwarder(url)
"""
def __init__(self, port):
"""
The init method for the PortForward call class. This one is supposed
to simply provide an initial class instance with a logger.
"""
self.port = port
def __call__(self, url):
"""
The call method for the PortForward class
"""
def dummyCall(self, url):
return url
return portForward(self.port)(dummyCall)(self, url) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/Utils/PortForward.py | 0.825273 | 0.496277 | PortForward.py | pypi |
from builtins import object
from functools import reduce
class Functor(object):
"""
A simple functor class used to construct a function call which later to be
applied on an (any type) object.
NOTE:
It expects a function in the constructor and an (any type) object
passed to the run or __call__ methods, which methods once called they
construct and return the following function:
func(obj, *args, **kwargs)
NOTE:
All the additional arguments which the function may take must be set in
the __init__ method. If any of them are passed during run time an error
will be raised.
:func:
The function to which the rest of the constructor arguments are about
to be attached and then the newly created function will be returned.
- The function needs to take at least one parameter since the object
passed to the run/__call__ methods will always be put as a first
argument to the function.
:Example:
def adder(a, b, *args, **kwargs):
if args:
print("adder args: %s" % args)
if kwargs:
print("adder kwargs: %s" % kwargs)
res = a + b
return res
>>> x=Functor(adder, 8, 'foo', bar=True)
>>> x(2)
adder args: foo
adder kwargs: {'bar': True}
adder res: 10
10
>>> x
<Pipeline.Functor instance at 0x7f319bbaeea8>
"""
def __init__(self, func, *args, **kwargs):
"""
The init method for class Functor
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, obj):
"""
The call method for class Functor
"""
return self.run(obj)
def run(self, obj):
return self.func(obj, *self.args, **self.kwargs)
class Pipeline(object):
"""
A simple Functional Pipeline Class: applies a set of functions to an object,
where the output of every previous function is an input to the next one.
"""
# NOTE:
# Similar and inspiring approaches but yet some different implementations
# are discussed in the following two links [1] & [2]. With a quite good
# explanation in [1], which helped a lot. All in all at the bottom always
# sits the reduce function.
# [1]
# https://softwarejourneyman.com/python-function-pipelines.html
# [2]
# https://gitlab.com/mc706/functional-pipeline
def __init__(self, funcLine=None, name=None):
"""
:funcLine: A list of functions or Functors of function + arguments (see
the Class definition above) that are to be applied sequentially
to the object.
- If any of the elements of 'funcLine' is a function, a direct
function call with the object as an argument is performed.
- If any of the elements of 'funcLine' is a Functor, then the
first argument of the Functor constructor is the function to
be evaluated and the object is passed as a first argument to
the function with all the rest of the arguments passed right
after it eg. the following Functor in the funcLine:
Functor(func, 'foo', bar=True)
will result in the following function call later when the
pipeline is executed:
func(obj, 'foo', bar=True)
:Example:
(using the adder function from above and an object of type int)
>>> pipe = Pipeline([Functor(adder, 5),
Functor(adder, 6),
Functor(adder, 7, "extraArg"),
Functor(adder, 8, update=True)])
>>> pipe.run(1)
adder res: 6
adder res: 12
adder args: extraArg
adder res: 19
adder kwargs: {'update': True}
adder res: 27
"""
self.funcLine = funcLine or []
self.name = name
def getPipelineName(self):
"""
__getPipelineName__
"""
name = self.name or "Unnamed Pipeline"
return name
def run(self, obj):
return reduce(lambda obj, functor: functor(obj), self.funcLine, obj) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/Utils/Pipeline.py | 0.750918 | 0.499512 | Pipeline.py | pypi |
# system modules
import os
import ssl
import time
import logging
import traceback
# third part library
try:
import jwt
except ImportError:
traceback.print_exc()
jwt = None
from Utils.Utilities import encodeUnicodeToBytes
# prevent "SSL: CERTIFICATE_VERIFY_FAILED" error
# this will cause pylint warning W0212, therefore we ignore it above
ssl._create_default_https_context = ssl._create_unverified_context
def readToken(name=None):
"""
Read IAM token either from environment or file name
:param name: ether file name containing token or environment name which hold the token value.
If not provided it will be assumed to read token from IAM_TOKEN environment.
:return: token or None
"""
if name and os.path.exists(name):
token = None
with open(name, 'r', encoding='utf-8') as istream:
token = istream.read()
return token
if name:
return os.environ.get(name)
return os.environ.get("IAM_TOKEN")
def tokenData(token, url="https://cms-auth.web.cern.ch/jwk", audUrl="https://wlcg.cern.ch/jwt/v1/any"):
"""
inspect and extract token data
:param token: token string
:param url: IAM provider URL
:param audUrl: audience string
"""
if not token or not jwt:
return {}
if isinstance(token, str):
token = encodeUnicodeToBytes(token)
jwksClient = jwt.PyJWKClient(url)
signingKey = jwksClient.get_signing_key_from_jwt(token)
key = signingKey.key
headers = jwt.get_unverified_header(token)
alg = headers.get('alg', 'RS256')
data = jwt.decode(
token,
key,
algorithms=[alg],
audience=audUrl,
options={"verify_exp": True},
)
return data
def isValidToken(token):
"""
check if given token is valid or not
:param token: token string
:return: true or false
"""
tokenDict = {}
tokenDict = tokenData(token)
exp = tokenDict.get('exp', 0) # expire, seconds since epoch
if not exp or exp < time.time():
return False
return True
class TokenManager():
"""
TokenManager class handles IAM tokens
"""
def __init__(self,
name=None,
url="https://cms-auth.web.cern.ch/jwk",
audUrl="https://wlcg.cern.ch/jwt/v1/any",
logger=None):
"""
Token manager reads IAM tokens either from file or env.
It caches token along with expiration timestamp.
By default the env variable to use is IAM_TOKEN.
:param name: string representing either file or env where we should read token from
:param url: IAM provider URL
:param audUrl: audience string
:param logger: logger object or none to use default one
"""
self.name = name
self.url = url
self.audUrl = audUrl
self.expire = 0
self.token = None
self.logger = logger if logger else logging.getLogger()
try:
self.token = self.getToken()
except Exception as exc:
self.logger.exception("Failed to get token. Details: %s", str(exc))
def getToken(self):
"""
Return valid token and sets its expire timestamp
"""
if not self.token or not isValidToken(self.token):
self.token = readToken(self.name)
tokenDict = {}
try:
tokenDict = tokenData(self.token, url=self.url, audUrl=self.audUrl)
self.logger.debug(tokenDict)
except Exception as exc:
self.logger.exception(str(exc))
raise
self.expire = tokenDict.get('exp', 0)
return self.token
def getLifetime(self):
"""
Return reamaining lifetime of existing token
"""
return self.expire - int(time.time()) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/Utils/TokenManager.py | 0.66061 | 0.165863 | TokenManager.py | pypi |
from copy import copy
from builtins import object
from time import time
class MemoryCacheException(Exception):
def __init__(self, message):
super(MemoryCacheException, self).__init__(message)
class MemoryCache():
__slots__ = ["lastUpdate", "expiration", "_cache"]
def __init__(self, expiration, initialData=None):
"""
Initializes cache object
:param expiration: expiration time in seconds
:param initialData: initial value for the cache
"""
self.lastUpdate = int(time())
self.expiration = expiration
self._cache = initialData
def __contains__(self, item):
"""
Check whether item is in the current cache
:param item: a simple object (string, integer, etc)
:return: True if the object can be found in the cache, False otherwise
"""
return item in self._cache
def __getitem__(self, keyName):
"""
If the cache is a dictionary, return that item from the cache. Else, raise an exception.
:param keyName: the key name from the dictionary
"""
if isinstance(self._cache, dict):
return copy(self._cache.get(keyName))
else:
raise MemoryCacheException("Cannot retrieve an item from a non-dict MemoryCache object: {}".format(self._cache))
def reset(self):
"""
Resets the cache to its current data type
"""
if isinstance(self._cache, (dict, set)):
self._cache.clear()
elif isinstance(self._cache, list):
del self._cache[:]
else:
raise MemoryCacheException("The cache needs to be reset manually, data type unknown")
def isCacheExpired(self):
"""
Evaluate whether the cache has already expired, returning
True if it did, otherwise it returns False
"""
return self.lastUpdate + self.expiration < int(time())
def getCache(self):
"""
Raises an exception if the cache has expired, otherwise returns
its data
"""
if self.isCacheExpired():
expiredSince = int(time()) - (self.lastUpdate + self.expiration)
raise MemoryCacheException("Memory cache expired for %d seconds" % expiredSince)
return self._cache
def setCache(self, inputData):
"""
Refresh the cache with the content provided (refresh its expiration as well)
This method enforces the user to not change the cache data type
:param inputData: data to store in the cache
"""
if not isinstance(self._cache, type(inputData)):
raise TypeError("Current cache data type: %s, while new value is: %s" %
(type(self._cache), type(inputData)))
self.reset()
self.lastUpdate = int(time())
self._cache = inputData
def addItemToCache(self, inputItem):
"""
Adds new item(s) to the cache, without resetting its expiration.
It, of course, only works for data caches of type: list, set or dict.
:param inputItem: additional item to be added to the current cached data
"""
if isinstance(self._cache, set) and isinstance(inputItem, (list, set)):
# extend another list or set into a set
self._cache.update(inputItem)
elif isinstance(self._cache, set) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a set
self._cache.add(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (list, set)):
# extend another list or set into a list
self._cache.extend(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a list
self._cache.append(inputItem)
elif isinstance(self._cache, dict) and isinstance(inputItem, dict):
self._cache.update(inputItem)
else:
msg = "Input item type: %s cannot be added to a cache type: %s" % (type(self._cache), type(inputItem))
raise TypeError("Cache and input item data type mismatch. %s" % msg) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/Utils/MemoryCache.py | 0.774796 | 0.226185 | MemoryCache.py | pypi |
from builtins import object
import logging
import time
import calendar
from datetime import tzinfo, timedelta
def gmtimeSeconds():
"""
Return GMT time in seconds
"""
return int(time.mktime(time.gmtime()))
def encodeTimestamp(secs):
"""
Encode second since epoch to a string GMT timezone representation
:param secs: input timestamp value (either int or float) in seconds since epoch
:return: time string in GMT timezone representation
"""
if not isinstance(secs, (int, float)):
raise Exception("Wrong input, should be seconds since epoch either int or float value")
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(int(secs)))
def decodeTimestamp(timeString):
"""
Decode timestamps in provided document
:param timeString: timestamp string represention in GMT timezone, see encodeTimestamp
:return: seconds since ecouch in GMT timezone
"""
if not isinstance(timeString, str):
raise Exception("Wrong input, should be time string in GMT timezone representation")
return calendar.timegm(time.strptime(timeString, "%Y-%m-%dT%H:%M:%SZ"))
def timeFunction(func):
"""
source: https://www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
Decorator function to measure how long a method/function takes to run
It returns a tuple with:
* wall clock time spent
* returned result of the function
* the function name
"""
def wrapper(*arg, **kw):
t1 = time.time()
res = func(*arg, **kw)
t2 = time.time()
return round((t2 - t1), 4), res, func.__name__
return wrapper
class CodeTimer(object):
"""
A context manager for timing function calls.
Adapted from https://www.blog.pythonlibrary.org/2016/05/24/python-101-an-intro-to-benchmarking-your-code/
Use like
with CodeTimer(label='Doing something'):
do_something()
"""
def __init__(self, label='The function', logger=None):
self.start = time.time()
self.label = label
self.logger = logger or logging.getLogger()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
runtime = round((end - self.start), 3)
self.logger.info(f"{self.label} took {runtime} seconds to complete")
class LocalTimezone(tzinfo):
"""
A required python 2 class to determine current timezone for formatting rfc3339 timestamps
Required for sending alerts to the MONIT AlertManager
Can be removed once WMCore starts using python3
Details of class can be found at: https://docs.python.org/2/library/datetime.html#tzinfo-objects
"""
def __init__(self):
super(LocalTimezone, self).__init__()
self.ZERO = timedelta(0)
self.STDOFFSET = timedelta(seconds=-time.timezone)
if time.daylight:
self.DSTOFFSET = timedelta(seconds=-time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return self.ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0 | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/Utils/Timers.py | 0.817028 | 0.254266 | Timers.py | pypi |
import copy
import unittest
class ExtendedUnitTestCase(unittest.TestCase):
"""
Class that can be imported to switch to 'mock'ed versions of
services.
"""
def assertContentsEqual(self, expected_obj, actual_obj, msg=None):
"""
A nested object comparison without regard for the ordering of contents. It asserts that
expected_obj and actual_obj contain the same elements and that their sub-elements are the same.
However, all sequences are allowed to contain the same elements, but in different orders.
"""
def traverse_dict(dictionary):
for key, value in list(dictionary.items()):
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
return
def get_dict_sortkey(x):
if isinstance(x, dict):
return list(x.keys())
else:
return x
def traverse_list(theList):
for value in theList:
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
theList.sort(key=get_dict_sortkey)
return
if not isinstance(expected_obj, type(actual_obj)):
self.fail(msg="The two objects are different type and cannot be compared: %s and %s" % (
type(expected_obj), type(actual_obj)))
expected = copy.deepcopy(expected_obj)
actual = copy.deepcopy(actual_obj)
if isinstance(expected, dict):
traverse_dict(expected)
traverse_dict(actual)
elif isinstance(expected, list):
traverse_list(expected)
traverse_list(actual)
else:
self.fail(msg="The two objects are different type (%s) and cannot be compared." % type(expected_obj))
return self.assertEqual(expected, actual) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/Utils/ExtendedUnitTestCase.py | 0.664758 | 0.501587 | ExtendedUnitTestCase.py | pypi |
from builtins import str, bytes
import subprocess
import os
import re
import zlib
import base64
import sys
from types import ModuleType, FunctionType
from gc import get_referents
def lowerCmsHeaders(headers):
"""
Lower CMS headers in provided header's dict. The WMCore Authentication
code check only cms headers in lower case, e.g. cms-xxx-yyy.
"""
lheaders = {}
for hkey, hval in list(headers.items()): # perform lower-case
# lower header keys since we check lower-case in headers
if hkey.startswith('Cms-') or hkey.startswith('CMS-'):
lheaders[hkey.lower()] = hval
else:
lheaders[hkey] = hval
return lheaders
def makeList(stringList):
"""
_makeList_
Make a python list out of a comma separated list of strings,
throws a ValueError if the input is not well formed.
If the stringList is already of type list, then return it untouched.
"""
if isinstance(stringList, list):
return stringList
if isinstance(stringList, str):
toks = stringList.lstrip(' [').rstrip(' ]').split(',')
if toks == ['']:
return []
return [str(tok.strip(' \'"')) for tok in toks]
raise ValueError("Can't convert to list %s" % stringList)
def makeNonEmptyList(stringList):
"""
_makeNonEmptyList_
Given a string or a list of strings, return a non empty list of strings.
Throws an exception in case the final list is empty or input data is not
a string or a python list
"""
finalList = makeList(stringList)
if not finalList:
raise ValueError("Input data cannot be an empty list %s" % stringList)
return finalList
def strToBool(string):
"""
Try to convert different variations of True or False (including a string
type object) to a boolean value.
In short:
* True gets mapped from: True, "True", "true", "TRUE".
* False gets mapped from: False, "False", "false", "FALSE"
* anything else will fail
:param string: expects a boolean or a string, but it could be anything else
:return: a boolean value, or raise an exception if value passed in is not supported
"""
if string is False or string is True:
return string
elif string in ["True", "true", "TRUE"]:
return True
elif string in ["False", "false", "FALSE"]:
return False
raise ValueError("Can't convert to bool: %s" % string)
def safeStr(string):
"""
_safeStr_
Cast simple data (int, float, basestring) to string.
"""
if not isinstance(string, (tuple, list, set, dict)):
return str(string)
raise ValueError("We're not supposed to convert %s to string." % string)
def diskUse():
"""
This returns the % use of each disk partition
"""
diskPercent = []
df = subprocess.Popen(["df", "-klP"], stdout=subprocess.PIPE)
output = df.communicate()[0]
output = decodeBytesToUnicode(output).split("\n")
for x in output:
split = x.split()
if split != [] and split[0] != 'Filesystem':
diskPercent.append({'mounted': split[5], 'percent': split[4]})
return diskPercent
def numberCouchProcess():
"""
This returns the number of couch process
"""
ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
process = ps.communicate()[0]
process = decodeBytesToUnicode(process).count('couchjs')
return process
def rootUrlJoin(base, extend):
"""
Adds a path element to the path within a ROOT url
"""
if base:
match = re.match("^root://([^/]+)/(.+)", base)
if match:
host = match.group(1)
path = match.group(2)
newpath = os.path.join(path, extend)
newurl = "root://%s/%s" % (host, newpath)
return newurl
return None
def zipEncodeStr(message, maxLen=5120, compressLevel=9, steps=100, truncateIndicator=" (...)"):
"""
_zipEncodeStr_
Utility to zip a string and encode it.
If zipped encoded length is greater than maxLen,
truncate message until zip/encoded version
is within the limits allowed.
"""
message = encodeUnicodeToBytes(message)
encodedStr = zlib.compress(message, compressLevel)
encodedStr = base64.b64encode(encodedStr)
if len(encodedStr) < maxLen or maxLen == -1:
return encodedStr
compressRate = 1. * len(encodedStr) / len(base64.b64encode(message))
# Estimate new length for message zip/encoded version
# to be less than maxLen.
# Also, append truncate indicator to message.
truncateIndicator = encodeUnicodeToBytes(truncateIndicator)
strLen = int((maxLen - len(truncateIndicator)) / compressRate)
message = message[:strLen] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
# If new length is not short enough, truncate
# recursively by steps
while len(encodedStr) > maxLen:
message = message[:-steps - len(truncateIndicator)] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
return encodedStr
def getSize(obj):
"""
_getSize_
Function to traverse an object and calculate its total size in bytes
:param obj: a python object
:return: an integer representing the total size of the object
Code extracted from Stack Overflow:
https://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
"""
# Custom objects know their class.
# Function objects seem to know way too much, including modules.
# Exclude modules as well.
BLACKLIST = type, ModuleType, FunctionType
if isinstance(obj, BLACKLIST):
raise TypeError('getSize() does not take argument of type: '+ str(type(obj)))
seen_ids = set()
size = 0
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, BLACKLIST) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
return size
def decodeBytesToUnicode(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of bytes (i.e. in py2 `str` or
`future.types.newbytes.newbytes`, in py3 `bytes`), then it is converted to
a sequence of unicode codepoints.
This function is useful for cleaning input data when using the
"unicode sandwich" approach, which involves converting bytes (i.e. strings
of type sequence of bytes) to unicode (i.e. strings of type sequence of
unicode codepoints, in py2 `unicode` or `future.types.newstr.newstr`,
in py3 `str` ) as soon as possible when recieving input data, and
converting unicode back to bytes as late as possible.
achtung!:
- converting unicode back to bytes is not covered by this function
- converting unicode back to bytes is not always necessary. when in doubt,
do not do it.
Reference: https://nedbatchelder.com/text/unipain.html
py2:
- "errors" can be: "strict", "ignore", "replace",
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, bytes):
return value.decode("utf-8", errors)
return value
def decodeBytesToUnicodeConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call decodeBytesToUnicode(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply decodeBytesToUnicode,
maintaining brevity.
Parameters
----------
value : any
passed to decodeBytesToUnicode
errors: str
passed to decodeBytesToUnicode
condition: boolean of object with attribute __bool__()
if True, then we run decodeBytesToUnicode. Usually PY2/PY3
"""
if condition:
return decodeBytesToUnicode(value, errors)
return value
def encodeUnicodeToBytes(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of unicode (i.e. in py2 `unicode` or
`future.types.newstr.newstr`, in py3 `str`), then it is converted to
a sequence of bytes.
This function is useful for encoding output data when using the
"unicode sandwich" approach, which involves converting unicode (i.e. strings
of type sequence of unicode codepoints) to bytes (i.e. strings of type
sequence of bytes, in py2 `str` or `future.types.newbytes.newbytes`,
in py3 `bytes`) as late as possible when passing a string to a third-party
function that only accepts bytes as input (pycurl's curl.setop is an
example).
py2:
- "errors" can be: "strict", "ignore", "replace", "xmlcharrefreplace"
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace",
"xmlcharrefreplace", "namereplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, str):
return value.encode("utf-8", errors)
return value
def encodeUnicodeToBytesConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call encodeUnicodeToBytes(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply encodeUnicodeToBytes,
maintaining brevity.
Parameters
----------
value : any
passed to encodeUnicodeToBytes
errors: str
passed to encodeUnicodeToBytes
condition: boolean of object with attribute __bool__()
if True, then we run encodeUnicodeToBytes. Usually PY2/PY3
"""
if condition:
return encodeUnicodeToBytes(value, errors)
return value | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/Utils/Utilities.py | 0.53777 | 0.283586 | Utilities.py | pypi |
import json
import urllib
from urllib.parse import urlparse, parse_qs, quote_plus
from collections import defaultdict
from Utils.CertTools import cert, ckey
from dbs.apis.dbsClient import aggFileLumis, aggFileParents
from WMCore.Services.pycurl_manager import getdata as multi_getdata
from Utils.PortForward import PortForward
def dbsListFileParents(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileParents API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file parents
"""
urls = ['%s/fileparents?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileParents
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsListFileLumis(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileLumis API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file lumis
"""
urls = ['%s/filelumis?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileLumis
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsBlockOrigin(dbsUrl, blocks):
"""
Concurrent counter part of DBS files API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of block origins for a given parent lfns
"""
urls = ['%s/blockorigin?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = None
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsParentFilesGivenParentDataset(dbsUrl, parentDataset, fInfo):
"""
Obtain parent files for given fileInfo object
:param dbsUrl: DBS URL
:param parentDataset: parent dataset name
:param fInfo: file info object
:return: list of parent files for given file info object
"""
portForwarder = PortForward(8443)
urls = []
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
urls.append(portForwarder(url))
func = None
uKey = None
rdict = getUrls(urls, func, uKey)
parentFiles = defaultdict(set)
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
url = portForwarder(url)
if url in rdict:
pFileList = rdict[url]
pFiles = {x['logical_file_name'] for x in pFileList}
parentFiles[fileInfo['logical_file_name']] = \
parentFiles[fileInfo['logical_file_name']].union(pFiles)
return parentFiles
def getUrls(urls, aggFunc, uKey=None):
"""
Perform parallel DBS calls for given set of urls and apply given aggregation
function to the results.
:param urls: list of DBS urls to call
:param aggFunc: aggregation function
:param uKey: url parameter to use for final dictionary
:return: dictionary of resuls where keys are urls and values are obtained results
"""
data = multi_getdata(urls, ckey(), cert())
rdict = {}
for row in data:
url = row['url']
code = int(row.get('code', 200))
error = row.get('error')
if code != 200:
msg = f"Fail to query {url}. Error: {code} {error}"
raise RuntimeError(msg)
if uKey:
key = urlParams(url).get(uKey)
else:
key = url
data = row.get('data', [])
res = json.loads(data)
if aggFunc:
rdict[key] = aggFunc(res)
else:
rdict[key] = res
return rdict
def urlParams(url):
"""
Return dictionary of URL parameters
:param url: URL link
:return: dictionary of URL parameters
"""
parsedUrl = urlparse(url)
rdict = parse_qs(parsedUrl.query)
for key, vals in rdict.items():
if len(vals) == 1:
rdict[key] = vals[0]
return rdict | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Services/DBS/DBSUtils.py | 0.572484 | 0.162746 | DBSUtils.py | pypi |
from __future__ import (division, print_function)
from builtins import str, bytes
from Utils.Utilities import encodeUnicodeToBytes
from io import BytesIO
import re
import xml.etree.cElementTree as ET
int_number_pattern = re.compile(r'(^[0-9-]$|^[0-9-][0-9]*$)')
float_number_pattern = re.compile(r'(^[-]?\d+\.\d*$|^\d*\.{1,1}\d+$)')
def adjust_value(value):
"""
Change null value to None.
"""
pat_float = float_number_pattern
pat_integer = int_number_pattern
if isinstance(value, str):
if value == 'null' or value == '(null)':
return None
elif pat_float.match(value):
return float(value)
elif pat_integer.match(value):
return int(value)
else:
return value
else:
return value
def xml_parser(data, prim_key):
"""
Generic XML parser
:param data: can be of type "file object", unicode string or bytes string
"""
if isinstance(data, (str, bytes)):
stream = BytesIO()
data = encodeUnicodeToBytes(data, "ignore")
stream.write(data)
stream.seek(0)
else:
stream = data
context = ET.iterparse(stream)
for event, elem in context:
row = {}
key = elem.tag
if key != prim_key:
continue
row[key] = elem.attrib
get_children(elem, event, row, key)
elem.clear()
yield row
def get_children(elem, event, row, key):
"""
xml_parser helper function. It gets recursively information about
children for given element tag. Information is stored into provided
row for given key. The change of notations can be applied during
parsing step by using provided notations dictionary.
"""
for child in elem.getchildren():
child_key = child.tag
child_data = child.attrib
if not child_data:
child_dict = adjust_value(child.text)
else:
child_dict = child_data
if child.getchildren(): # we got grand-children
if child_dict:
row[key][child_key] = child_dict
else:
row[key][child_key] = {}
if isinstance(child_dict, dict):
newdict = {child_key: child_dict}
else:
newdict = {child_key: {}}
get_children(child, event, newdict, child_key)
row[key][child_key] = newdict[child_key]
else:
if not isinstance(row[key], dict):
row[key] = {}
row[key].setdefault(child_key, [])
row[key][child_key].append(child_dict)
child.clear() | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Services/TagCollector/XMLUtils.py | 0.567697 | 0.201794 | XMLUtils.py | pypi |
from __future__ import division
from builtins import object
from datetime import timedelta, datetime
import socket
import json
import logging
from WMCore.Services.pycurl_manager import RequestHandler
from Utils.Timers import LocalTimezone
class AlertManagerAPI(object):
"""
A class used to send alerts via the MONIT AlertManager API
"""
def __init__(self, alertManagerUrl, logger=None):
self.alertManagerUrl = alertManagerUrl
# sender's hostname is added as an annotation
self.hostname = socket.gethostname()
self.mgr = RequestHandler()
self.ltz = LocalTimezone()
self.headers = {"Content-Type": "application/json"}
self.validSeverity = ["high", "medium", "low"]
self.logger = logger if logger else logging.getLogger()
def sendAlert(self, alertName, severity, summary, description, service, tag="wmcore", endSecs=600, generatorURL=""):
"""
:param alertName: a unique name for the alert
:param severity: low, medium, high
:param summary: a short description of the alert
:param description: a longer informational message with details about the alert
:param service: the name of the service firing an alert
:param tag: a unique tag used to help route the alert
:param endSecs: how many minutes until the alarm is silenced
:param generatorURL: this URL will be sent to AlertManager and configured as a clickable "Source" link in the web interface
AlertManager JSON format reference: https://www.prometheus.io/docs/alerting/latest/clients/
[
{
"labels": {
"alertname": "<requiredAlertName>",
"<labelname>": "<labelvalue>",
...
},
"annotations": {
"<labelname>": "<labelvalue>",
...
},
"startsAt": "<rfc3339>", # optional, will be current time if not present
"endsAt": "<rfc3339>",
"generatorURL": "<generator_url>" # optional
},
]
"""
if not self._isValidSeverity(severity):
return False
request = []
alert = {}
labels = {}
annotations = {}
# add labels
labels["alertname"] = alertName
labels["severity"] = severity
labels["tag"] = tag
labels["service"] = service
alert["labels"] = labels
# add annotations
annotations["hostname"] = self.hostname
annotations["summary"] = summary
annotations["description"] = description
alert["annotations"] = annotations
# In python3 we won't need the LocalTimezone class
# Will change to d = datetime.now().astimezone() + timedelta(seconds=endSecs)
d = datetime.now(self.ltz) + timedelta(seconds=endSecs)
alert["endsAt"] = d.isoformat("T")
alert["generatorURL"] = generatorURL
request.append(alert)
# need to do this because pycurl_manager only accepts dict and encoded strings type
params = json.dumps(request)
res = self.mgr.getdata(self.alertManagerUrl, params=params, headers=self.headers, verb='POST')
return res
def _isValidSeverity(self, severity):
"""
Used to check if the severity of the alert matches the valid levels: low, medium, high
:param severity: severity of the alert
:return: True or False
"""
if severity not in self.validSeverity:
logging.critical("Alert submitted to AlertManagerAPI with invalid severity: %s", severity)
return False
return True | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Services/AlertManager/AlertManagerAPI.py | 0.810554 | 0.161849 | AlertManagerAPI.py | pypi |
from builtins import str
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class DBCreator(DBFormatter):
"""
_DBCreator_
Generic class for creating database tables.
"""
def __init__(self, logger, dbinterface):
"""
_init_
Call the constructor of the parent class and create empty dictionaries
to hold table create statements, constraint statements and insert
statements.
"""
DBFormatter.__init__(self, logger, dbinterface)
self.create = {}
self.constraints = {}
self.inserts = {}
self.indexes = {}
def execute(self, conn = None, transaction = False):
"""
_execute_
Generic method to create tables and constraints by execute
sql statements in the create, and constraints dictionaries.
Before execution the keys assigned to the tables in the self.create
dictionary are sorted, to offer the possibilitiy of executing
table creation in a certain order.
"""
# create tables
for i in sorted(self.create.keys()):
try:
self.dbi.processData(self.create[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.create[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# create indexes
for i in self.indexes:
try:
self.dbi.processData(self.indexes[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.indexes[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# set constraints
for i in self.constraints:
try:
self.dbi.processData(self.constraints[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.constraints[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# insert permanent data
for i in self.inserts:
try:
self.dbi.processData(self.inserts[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.inserts[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
return True
def __str__(self):
"""
_str_
Return a well formatted text representation of the schema held in the
self.create, self.constraints, self.inserts, self.indexes dictionaries.
"""
string = ''
for i in self.create, self.constraints, self.inserts, self.indexes:
for j in i:
string = string + i[j].lstrip() + '\n'
return string | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Database/DBCreator.py | 0.526586 | 0.233969 | DBCreator.py | pypi |
from __future__ import division, print_function
from builtins import str, object
try:
import mongomock
except ImportError:
# this library should only be required by unit tests
mongomock = None
from pymongo import MongoClient, errors, IndexModel
from pymongo.errors import ConnectionFailure
class MongoDB(object):
"""
A simple wrapper class for creating a connection to a MongoDB instance
"""
def __init__(self, database=None, server=None,
create=False, collections=None, testIndexes=False,
logger=None, mockMongoDB=False, **kwargs):
"""
:databases: A database Name to connect to
:server: The server url or a list of (server:port) pairs (see https://docs.mongodb.com/manual/reference/connection-string/)
:create: A flag to trigger a database creation (if missing) during
object construction, together with collections if present.
:collections: A list of tuples describing collections with indexes -
the first element is considered the collection name, all
the rest elements are considered as indexes
:testIndexes: A flag to trigger index test and eventually to create them
if missing (TODO)
:mockMongoDB: A flag to trigger a database simulation instead of trying
to connect to a real database server.
:logger: Logger
Here follows a short list of usefull optional parameters accepted by the
MongoClient which may be passed as keyword arguments to the current module:
:replicaSet: The name of the replica set to connect to. The driver will verify
that all servers it connects to match this name. Implies that the
hosts specified are a seed list and the driver should attempt to
find all members of the set. Defaults to None.
:port: The port number on which to connect. It is overwritten by the ports
defined in the Url string or from the tuples listed in the server list
:connect: If True, immediately begin connecting to MongoDB in the background.
Otherwise connect on the first operation.
:directConnection: If True, forces the client to connect directly to the specified MongoDB
host as a standalone. If False, the client connects to the entire
replica set of which the given MongoDB host(s) is a part.
If this is True and a mongodb+srv:// URI or a URI containing multiple
seeds is provided, an exception will be raised.
:username: A string
:password: A string
Although username and password must be percent-escaped in a MongoDB URI,
they must not be percent-escaped when passed as parameters. In this example,
both the space and slash special characters are passed as-is:
MongoClient(username="user name", password="pass/word")
"""
self.server = server
self.logger = logger
self.mockMongoDB = mockMongoDB
if mockMongoDB and mongomock is None:
msg = "You are trying to mock MongoDB, but you do not have mongomock in the python path."
self.logger.critical(msg)
raise ImportError(msg)
# NOTE: We need to explicitely check for server availiability.
# From pymongo Documentation: https://pymongo.readthedocs.io/en/stable/api/pymongo/mongo_client.html
# """
# ...
# Starting with version 3.0 the :class:`MongoClient`
# constructor no longer blocks while connecting to the server or
# servers, and it no longer raises
# :class:`~pymongo.errors.ConnectionFailure` if they are
# unavailable, nor :class:`~pymongo.errors.ConfigurationError`
# if the user's credentials are wrong. Instead, the constructor
# returns immediately and launches the connection process on
# background threads.
# ...
# """
try:
if mockMongoDB:
self.client = mongomock.MongoClient()
self.logger.info("NOTICE: MongoDB is set to use mongomock, instead of real database.")
else:
self.client = MongoClient(host=self.server, **kwargs)
self.client.server_info()
self.client.admin.command('ping')
except ConnectionFailure as ex:
msg = "Could not connect to MongoDB server: %s. Server not available. \n"
msg += "Giving up Now."
self.logger.error(msg, self.server)
raise ex from None
except Exception as ex:
msg = "Could not connect to MongoDB server: %s. Due to unknown reason: %s\n"
msg += "Giving up Now."
self.logger.error(msg, self.server, str(ex))
raise ex from None
self.create = create
self.testIndexes = testIndexes
self.dbName = database
self.collections = collections or []
self._dbConnect(database)
if self.create and self.collections:
for collection in self.collections:
self._collCreate(collection, database)
if self.testIndexes and self.collections:
for collection in self.collections:
self._indexTest(collection[0], collection[1])
def _indexTest(self, collection, index):
pass
def _collTest(self, coll, db):
# self[db].list_collection_names()
pass
def collCreate(self, coll):
"""
A public method for _collCreate
"""
self._collCreate(coll, self.database)
def _collCreate(self, coll, db):
"""
A function used to explicitly create a collection with the relevant
indexes - used to avoid the Lazy Creating from MongoDB and eventual issues
in case we end up with no indexed collection, especially ones missing
the (`unique` index parameter)
:coll: A tuple describing one collection with indexes -
The first element is considered to be the collection name, and all
the rest of the elements are considered to be indexes.
The indexes must be of type IndexModel. See pymongo documentation:
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.create_index
:db: The database name for the collection
"""
collName = coll[0]
collIndexes = list(coll[1:])
try:
self.client[db].create_collection(collName)
except errors.CollectionInvalid:
# this error is thrown in case of an already existing collection
msg = "Collection '{}' Already exists in database '{}'".format(coll, db)
self.logger.warning(msg)
if collIndexes:
for index in collIndexes:
if not isinstance(index, IndexModel):
msg = "ERR: Bad Index type for collection %s" % collName
raise errors.InvalidName
try:
self.client[db][collName].create_indexes(collIndexes)
except Exception as ex:
msg = "Failed to create indexes on collection: %s\n%s" % (collName, str(ex))
self.logger.error(msg)
raise ex
def _dbTest(self, db):
"""
Tests database connection.
"""
# Test connection (from mongoDB documentation):
# https://api.mongodb.com/python/3.4.0/api/pymongo/mongo_client.html
try:
# The 'ismaster' command is cheap and does not require auth.
self.client.admin.command('ismaster')
except errors.ConnectionFailure as ex:
msg = "Server not available: %s" % str(ex)
self.logger.error(msg)
raise ex
# Test for database existence
if db not in self.client.list_database_names():
msg = "Missing MongoDB databases: %s" % db
self.logger.error(msg)
raise errors.InvalidName
def _dbCreate(self, db):
# creating an empty collection in order to create the database
_initColl = self.client[db].create_collection('_initCollection')
_initColl.insert_one({})
# NOTE: never delete the _initCollection if you want the database to persist
# self.client[db].drop_collection('_initCollection')
def dbConnect(self):
"""
A public method for _dbConnect
"""
self._dbConnect(self.database)
def _dbConnect(self, db):
"""
The function to be used for the initial database connection creation and testing
"""
try:
setattr(self, db, self.client[db])
if not self.mockMongoDB:
self._dbTest(db)
except errors.ConnectionFailure as ex:
msg = "Could not connect to MongoDB server for database: %s\n%s\n" % (db, str(ex))
msg += "Giving up Now."
self.logger.error(msg)
raise ex
except errors.InvalidName as ex:
msg = "Could not connect to a missing MongoDB databases: %s\n%s" % (db, str(ex))
self.logger.error(msg)
if self.create:
msg = "Trying to create: %s" % db
self.logger.error(msg)
try:
# self._dbCreate(getattr(self, db))
self._dbCreate(db)
except Exception as exc:
msg = "Could not create MongoDB databases: %s\n%s\n" % (db, str(exc))
msg += "Giving up Now."
self.logger.error(msg)
raise exc
try:
self._dbTest(db)
except Exception as exc:
msg = "Second failure while testing %s\n%s\n" % (db, str(exc))
msg += "Giving up Now."
self.logger.error(msg)
raise exc
msg = "Database %s successfully created" % db
self.logger.error(msg)
except Exception as ex:
msg = "General Exception while trying to connect to : %s\n%s" % (db, str(ex))
self.logger.error(msg)
raise ex | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Database/MongoDB.py | 0.660829 | 0.271949 | MongoDB.py | pypi |
import logging
import time
from WMCore.DataStructs.WMObject import WMObject
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class Transaction(WMObject):
dbi = None
def __init__(self, dbinterface = None):
"""
Get the connection from the DBInterface and open a new transaction on it
"""
self.dbi = dbinterface
self.conn = None
self.transaction = None
def begin(self):
if self.conn == None:
self.conn = self.dbi.connection()
if self.conn.closed:
self.conn = self.dbi.connection()
if self.transaction == None:
self.transaction = self.conn.begin()
return
def processData(self, sql, binds={}):
"""
Propagates the request to the proper dbcore backend,
and performs checks for lost (or closed) connection.
"""
result = self.dbi.processData(sql, binds, conn = self.conn,
transaction = True)
return result
def commit(self):
"""
Commit the transaction and return the connection to the pool
"""
if not self.transaction == None:
self.transaction.commit()
if not self.conn == None:
self.conn.close()
self.conn = None
self.transaction = None
def rollback(self):
"""
To be called if there is an exception and you want to roll back the
transaction and return the connection to the pool
"""
if self.transaction:
self.transaction.rollback()
if self.conn:
self.conn.close()
self.conn = None
self.transaction = None
return
def rollbackForError(self):
"""
This is called when handling a major exception. This is because sometimes
you can end up in a situation where the transaction appears open, but is not. In
this case, calling a rollback on the transaction will cause an exception, which
then destroys all logging and shutdown of the actual code.
Use only in components.
"""
try:
self.rollback()
except:
pass
return | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Database/Transaction.py | 0.487063 | 0.150809 | Transaction.py | pypi |
from copy import copy
from Utils.IteratorTools import grouper
import WMCore.WMLogging
from WMCore.DataStructs.WMObject import WMObject
from WMCore.Database.ResultSet import ResultSet
class DBInterface(WMObject):
"""
Base class for doing SQL operations using a SQLAlchemy engine, or
pre-exisitng connection.
processData will take a (list of) sql statements and a (list of)
bind variable dictionaries and run the statements on the DB. If
necessary it will substitute binds into the sql (MySQL).
TODO:
Add in some suitable exceptions in one or two places
Test the hell out of it
Support executemany()
"""
logger = None
engine = None
def __init__(self, logger, engine):
self.logger = logger
self.logger.info ("Instantiating base WM DBInterface")
self.engine = engine
self.maxBindsPerQuery = 500
def buildbinds(self, sequence, thename, therest=[{}]):
"""
Build a list of binds. Can be used recursively, e.g.:
buildbinds(file, 'file', buildbinds(pnn, 'location'), {'lumi':123})
TODO: replace with an appropriate map function
"""
binds = []
for r in sequence:
for i in self.makelist(therest):
thebind = copy(i)
thebind[thename] = r
binds.append(thebind)
return binds
def executebinds(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executebinds_
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
if b == None:
resultProxy = connection.execute(s)
else:
resultProxy = connection.execute(s, b)
if returnCursor:
return resultProxy
result = ResultSet()
result.add(resultProxy)
resultProxy.close()
return result
def executemanybinds(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executemanybinds_
b is a list of dictionaries for the binds, e.g.:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
see: http://www.gingerandjohn.com/archives/2004/02/26/cx_oracle-executemany-example/
Can't executemany() selects - so do each combination of binds here instead.
This will return a list of sqlalchemy.engine.base.ResultProxy object's
one for each set of binds.
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
s = s.strip()
if s.lower().endswith('select', 0, 6):
"""
Trying to select many
"""
if returnCursor:
result = []
for bind in b:
result.append(connection.execute(s, bind))
else:
result = ResultSet()
for bind in b:
resultproxy = connection.execute(s, bind)
result.add(resultproxy)
resultproxy.close()
return self.makelist(result)
"""
Now inserting or updating many
"""
result = connection.execute(s, b)
return self.makelist(result)
def connection(self):
"""
Return a connection to the engine (from the connection pool)
"""
return self.engine.connect()
def processData(self, sqlstmt, binds={}, conn=None,
transaction=False, returnCursor=False):
"""
set conn if you already have an active connection to reuse
set transaction = True if you already have an active transaction
"""
connection = None
try:
if not conn:
connection = self.connection()
else:
connection = conn
result = []
# Can take either a single statement or a list of statements and binds
sqlstmt = self.makelist(sqlstmt)
binds = self.makelist(binds)
if len(sqlstmt) > 0 and (len(binds) == 0 or (binds[0] == {} or binds[0] == None)):
# Should only be run by create statements
if not transaction:
#WMCore.WMLogging.sqldebug("transaction created in DBInterface")
trans = connection.begin()
for i in sqlstmt:
r = self.executebinds(i, connection=connection,
returnCursor=returnCursor)
result.append(r)
if not transaction:
trans.commit()
elif len(binds) > len(sqlstmt) and len(sqlstmt) == 1:
#Run single SQL statement for a list of binds - use execute_many()
if not transaction:
trans = connection.begin()
for subBinds in grouper(binds, self.maxBindsPerQuery):
result.extend(self.executemanybinds(sqlstmt[0], subBinds,
connection=connection, returnCursor=returnCursor))
if not transaction:
trans.commit()
elif len(binds) == len(sqlstmt):
# Run a list of SQL for a list of binds
if not transaction:
trans = connection.begin()
for i, s in enumerate(sqlstmt):
b = binds[i]
r = self.executebinds(s, b, connection=connection,
returnCursor=returnCursor)
result.append(r)
if not transaction:
trans.commit()
else:
self.logger.exception(
"DBInterface.processData Nothing executed, problem with your arguments")
self.logger.exception(
"DBInterface.processData SQL = %s" % sqlstmt)
WMCore.WMLogging.sqldebug('DBInterface.processData sql is %s items long' % len(sqlstmt))
WMCore.WMLogging.sqldebug('DBInterface.processData binds are %s items long' % len(binds))
assert_value = False
if len(binds) == len(sqlstmt):
assert_value = True
WMCore.WMLogging.sqldebug('DBInterface.processData are binds and sql same length? : %s' % (assert_value))
WMCore.WMLogging.sqldebug('sql: %s\n binds: %s\n, connection:%s\n, transaction:%s\n' %
(sqlstmt, binds, connection, transaction))
WMCore.WMLogging.sqldebug('type check:\nsql: %s\n binds: %s\n, connection:%s\n, transaction:%s\n' %
(type(sqlstmt), type(binds), type(connection), type(transaction)))
raise Exception("""DBInterface.processData Nothing executed, problem with your arguments
Probably mismatched sizes for sql (%i) and binds (%i)""" % (len(sqlstmt), len(binds)))
finally:
if not conn and connection != None:
connection.close() # Return connection to the pool
return result | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Database/DBCore.py | 0.414069 | 0.245401 | DBCore.py | pypi |
import copy
from WMCore.Database.DBCore import DBInterface
from WMCore.Database.ResultSet import ResultSet
def bindVarCompare(a):
"""
_bindVarCompare_
Bind variables are represented as a tuple with the first element being the
variable name and the second being it's position in the query. We sort on
the position in the query.
"""
return a[1]
def stringLengthCompare(a):
"""
_stringLengthCompare_
Sort comparison function to sort strings by length.
Since we want to sort from longest to shortest, this must be reversed when used
"""
return len(a)
class MySQLInterface(DBInterface):
def substitute(self, origSQL, origBindsList):
"""
_substitute_
Transform as set of bind variables from a list of dictionaries to a list
of tuples:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
Will be transformed into:
b = [ ('value1a', 'value2a'), ('value1b', 'value2b')]
Don't need to substitute in the binds as executemany does that
internally. But the sql will also need to be reformatted, such that
:bind_name becomes %s.
See: http://www.devshed.com/c/a/Python/MySQL-Connectivity-With-Python/5/
"""
if origBindsList == None:
return origSQL, None
origBindsList = self.makelist(origBindsList)
origBind = origBindsList[0]
bindVarPositionList = []
updatedSQL = copy.copy(origSQL)
# We process bind variables from longest to shortest to avoid a shorter
# bind variable matching a longer one. For example if we have two bind
# variables: RELEASE_VERSION and RELEASE_VERSION_ID the former will
# match against the latter, causing problems. We'll sort the variable
# names by length to guard against this.
bindVarNames = list(origBind)
bindVarNames.sort(key=stringLengthCompare, reverse=True)
bindPositions = {}
for bindName in bindVarNames:
searchPosition = 0
while True:
bindPosition = origSQL.lower().find(":%s" % bindName.lower(),
searchPosition)
if bindPosition == -1:
break
if bindPosition not in bindPositions:
bindPositions[bindPosition] = 0
bindVarPositionList.append((bindName, bindPosition))
searchPosition = bindPosition + 1
searchPosition = 0
while True:
bindPosition = updatedSQL.lower().find(":%s" % bindName.lower(),
searchPosition)
if bindPosition == -1:
break
left = updatedSQL[0:bindPosition]
right = updatedSQL[bindPosition + len(bindName) + 1:]
updatedSQL = left + "%s" + right
bindVarPositionList.sort(key=bindVarCompare)
mySQLBindVarsList = []
for origBind in origBindsList:
mySQLBindVars = []
for bindVarPosition in bindVarPositionList:
mySQLBindVars.append(origBind[bindVarPosition[0]])
mySQLBindVarsList.append(tuple(mySQLBindVars))
return (updatedSQL, mySQLBindVarsList)
def executebinds(self, s = None, b = None, connection = None,
returnCursor = False):
"""
_executebinds_
Execute a SQL statement that has a single set of bind variables.
Transform the bind variables into the format that MySQL expects.
"""
s, b = self.substitute(s, b)
return DBInterface.executebinds(self, s, b, connection, returnCursor)
def executemanybinds(self, s = None, b = None, connection = None,
returnCursor = False):
"""
_executemanybinds_
Execute a SQL statement that has multiple sets of bind variables.
Transform the bind variables into the format that MySQL expects.
"""
newsql, binds = self.substitute(s, b)
return DBInterface.executemanybinds(self, newsql, binds, connection,
returnCursor) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Database/MySQLCore.py | 0.637031 | 0.431105 | MySQLCore.py | pypi |
from __future__ import print_function
from builtins import str, bytes, int
from future.utils import viewitems
from Utils.PythonVersion import PY2
import sys
import types
class _EmptyClass(object):
pass
class JSONThunker(object):
"""
_JSONThunker_
Converts an arbitrary object to <-> from a jsonable object.
Will, for the most part "do the right thing" about various instance objects
by storing their class information along with their data in a dict. Handles
a recursion limit to prevent infinite recursion.
self.passThroughTypes - stores a list of types that should be passed
through unchanged to the JSON parser
self.blackListedModules - a list of modules that should not be stored in
the JSON.
"""
def __init__(self):
self.passThroughTypes = (type(None),
bool,
int,
float,
complex,
str,
bytes,
)
# objects that inherit from dict should be treated as a dict
# they don't store their data in __dict__. There was enough
# of those classes that it warrented making a special case
self.dictSortOfObjects = (('WMCore.Datastructs.Job', 'Job'),
('WMCore.WMBS.Job', 'Job'),
('WMCore.Database.CMSCouch', 'Document'))
# ditto above, but for lists
self.listSortOfObjects = (('WMCore.DataStructs.JobPackage', 'JobPackage'),
('WMCore.WMBS.JobPackage', 'JobPackage'),)
self.foundIDs = {}
# modules we don't want JSONed
self.blackListedModules = ('sqlalchemy.engine.threadlocal',
'WMCore.Database.DBCore',
'logging',
'WMCore.DAOFactory',
'WMCore.WMFactory',
'WMFactory',
'WMCore.Configuration',
'WMCore.Database.Transaction',
'threading',
'datetime')
def checkRecursion(self, data):
"""
handles checking for infinite recursion
"""
if id(data) in self.foundIDs:
if self.foundIDs[id(data)] > 5:
self.unrecurse(data)
return "**RECURSION**"
else:
self.foundIDs[id(data)] += 1
return data
else:
self.foundIDs[id(data)] = 1
return data
def unrecurse(self, data):
"""
backs off the recursion counter if we're returning from _thunk
"""
try:
self.foundIDs[id(data)] -= 1
except:
print("Could not find count for id %s of type %s data %s" % (id(data), type(data), data))
raise
def checkBlackListed(self, data):
"""
checks to see if a given object is from a blacklisted module
"""
try:
# special case
if data.__class__.__module__ == 'WMCore.Database.CMSCouch' and data.__class__.__name__ == 'Document':
data.__class__ = type({})
return data
if data.__class__.__module__ in self.blackListedModules:
return "Blacklisted JSON object: module %s, name %s, str() %s" % \
(data.__class__.__module__, data.__class__.__name__, str(data))
else:
return data
except Exception:
return data
def thunk(self, toThunk):
"""
Thunk - turns an arbitrary object into a JSONable object
"""
self.foundIDs = {}
data = self._thunk(toThunk)
return data
def unthunk(self, data):
"""
unthunk - turns a previously 'thunked' object back into a python object
"""
return self._unthunk(data)
def handleSetThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
tempDict = {'thunker_encoded_json': True, 'type': 'set'}
tempDict['set'] = self._thunk(list(toThunk))
self.unrecurse(toThunk)
return tempDict
def handleListThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
for k, v in enumerate(toThunk):
toThunk[k] = self._thunk(v)
self.unrecurse(toThunk)
return toThunk
def handleDictThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
special = False
tmpdict = {}
for k, v in viewitems(toThunk):
if type(k) == type(int):
special = True
tmpdict['_i:%s' % k] = self._thunk(v)
elif type(k) == type(float):
special = True
tmpdict['_f:%s' % k] = self._thunk(v)
else:
tmpdict[k] = self._thunk(v)
if special:
toThunk['thunker_encoded_json'] = self._thunk(True)
toThunk['type'] = self._thunk('dict')
toThunk['dict'] = tmpdict
else:
toThunk.update(tmpdict)
self.unrecurse(toThunk)
return toThunk
def handleObjectThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
toThunk = self.checkBlackListed(toThunk)
if isinstance(toThunk, (str, bytes)):
# things that got blacklisted
return toThunk
if hasattr(toThunk, '__to_json__'):
# Use classes own json thunker
toThunk2 = toThunk.__to_json__(self)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, dict):
toThunk2 = self.handleDictObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, list):
# a mother thunking list
toThunk2 = self.handleListObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
else:
try:
thunktype = '%s.%s' % (toThunk.__class__.__module__,
toThunk.__class__.__name__)
tempDict = {'thunker_encoded_json': True, 'type': thunktype}
tempDict[thunktype] = self._thunk(toThunk.__dict__)
self.unrecurse(toThunk)
return tempDict
except Exception as e:
tempDict = {'json_thunk_exception_': "%s" % e}
self.unrecurse(toThunk)
return tempDict
def handleDictObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_dict': True,
'type': thunktype,
thunktype: {}}
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
for k, v in viewitems(data):
tempDict[thunktype][k] = self._thunk(v)
return tempDict
def handleDictObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_dict', False)
thunktype = data.pop('type', False)
for k, v in viewitems(data):
if k == thunktype:
for k2, v2 in viewitems(data[thunktype]):
value[k2] = self._unthunk(v2)
else:
value.__dict__[k] = self._unthunk(v)
return value
def handleListObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_list': True,
'type': thunktype,
thunktype: []}
for k, v in enumerate(data):
tempDict['thunktype'].append(self._thunk(v))
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
return tempDict
def handleListObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_list', False)
thunktype = data.pop('type')
for k, v in viewitems(data[thunktype]):
setattr(value, k, self._unthunk(v))
for k, v in viewitems(data):
if k == thunktype:
continue
value.__dict__ = self._unthunk(v)
return value
def _thunk(self, toThunk):
"""
helper function for thunk, does the actual work
"""
if isinstance(toThunk, self.passThroughTypes):
return toThunk
elif type(toThunk) is list:
return self.handleListThunk(toThunk)
elif type(toThunk) is dict:
return self.handleDictThunk(toThunk)
elif type(toThunk) is set:
return self.handleSetThunk(toThunk)
elif type(toThunk) is types.FunctionType:
self.unrecurse(toThunk)
return "function reference"
elif isinstance(toThunk, object):
return self.handleObjectThunk(toThunk)
else:
self.unrecurse(toThunk)
raise RuntimeError(type(toThunk))
def _unthunk(self, jsondata):
"""
_unthunk - does the actual work for unthunk
"""
if PY2 and type(jsondata) is str:
return jsondata.encode("utf-8")
if type(jsondata) is dict:
if 'thunker_encoded_json' in jsondata:
# we've got a live one...
if jsondata['type'] == 'set':
newSet = set()
for i in self._unthunk(jsondata['set']):
newSet.add(self._unthunk(i))
return newSet
if jsondata['type'] == 'dict':
# We have a "special" dict
data = {}
for k, v in viewitems(jsondata['dict']):
tmp = self._unthunk(v)
if k.startswith('_i:'):
data[int(k.lstrip('_i:'))] = tmp
elif k.startswith('_f:'):
data[float(k.lstrip('_f:'))] = tmp
else:
data[k] = tmp
return data
else:
# spawn up an instance.. good luck
# here be monsters
# inspired from python's pickle code
ourClass = self.getThunkedClass(jsondata)
value = _EmptyClass()
if hasattr(ourClass, '__from_json__'):
# Use classes own json loader
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = ourClass.__from_json__(value, jsondata, self)
elif 'thunker_encoded_json' in jsondata and 'is_dict' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleDictObjectUnThunk(value, jsondata)
elif 'thunker_encoded_json' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleListObjectUnThunk(value, jsondata)
else:
raise RuntimeError('Could not unthunk a class. Code to try was removed because it had errors.')
return value
else:
data = {}
for k, v in viewitems(jsondata):
data[k] = self._unthunk(v)
return data
else:
return jsondata
@staticmethod
def getThunkedClass(jsondata):
"""
Work out the class from it's thunked json representation
"""
module = jsondata['type'].rsplit('.', 1)[0]
name = jsondata['type'].rsplit('.', 1)[1]
if (module == 'WMCore.Services.Requests') and (name == JSONThunker):
raise RuntimeError("Attempted to unthunk a JSONThunker..")
__import__(module)
mod = sys.modules[module]
ourClass = getattr(mod, name)
return ourClass | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Wrappers/JsonWrapper/JSONThunker.py | 0.443118 | 0.360208 | JSONThunker.py | pypi |
from builtins import next, str, object
from future.utils import viewitems
import xml.parsers.expat
class Node(object):
"""
_Node_
Really simple DOM like container to simplify parsing the XML file
and formatting the character data without all the whitespace guff
"""
def __init__(self, name, attrs):
self.name = str(name)
self.attrs = {}
self.text = None
for k, v in viewitems(attrs):
self.attrs.__setitem__(str(k), str(v))
self.children = []
def __str__(self):
result = " %s %s \"%s\"\n" % (self.name, self.attrs, self.text)
for child in self.children:
result += str(child)
return result
def coroutine(func):
"""
_coroutine_
Decorator method used to prime coroutines
"""
def start(*args,**kwargs):
cr = func(*args,**kwargs)
next(cr)
return cr
return start
def xmlFileToNode(reportFile):
"""
_xmlFileToNode_
Use expat and the build coroutine to parse the XML file and build
a node structure
"""
node = Node("JobReports", {})
expat_parse(open(reportFile, 'rb'),
build(node))
return node
def expat_parse(f, target):
"""
_expat_parse_
Expat based XML parsing that feeds a node building coroutine
"""
parser = xml.parsers.expat.ParserCreate()
#parser.buffer_size = 65536
parser.buffer_text = True
# a leftover from the py2py3 migration - TO BE REMOVED
# parser.returns_unicode = False
parser.StartElementHandler = \
lambda name,attrs: target.send(('start',(name,attrs)))
parser.EndElementHandler = \
lambda name: target.send(('end',name))
parser.CharacterDataHandler = \
lambda data: target.send(('text',data))
parser.ParseFile(f)
@coroutine
def build(topNode):
"""
_build_
Node structure builder that is fed from the expat_parse method
"""
nodeStack = [topNode]
charCache = []
while True:
event, value = (yield)
if event == "start":
charCache = []
newnode = Node(value[0], value[1])
nodeStack[-1].children.append(newnode)
nodeStack.append(newnode)
elif event == "text":
charCache.append(value)
else: # end
nodeStack[-1].text = str(''.join(charCache)).strip()
nodeStack.pop()
charCache = [] | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Algorithms/ParseXMLFile.py | 0.592431 | 0.276608 | ParseXMLFile.py | pypi |
from __future__ import print_function, division
from builtins import str, range
import math
import decimal
import logging
from WMCore.WMException import WMException
class MathAlgoException(WMException):
"""
Some simple math algo exceptions
"""
pass
def getAverageStdDev(numList):
"""
_getAverageStdDev_
Given a list, calculate both the average and the
standard deviation.
"""
if len(numList) < 0:
# Nothing to do here
return 0.0, 0.0
total = 0.0
average = 0.0
stdBase = 0.0
# Assemble the average
skipped = 0
for value in numList:
try:
if math.isnan(value) or math.isinf(value):
skipped += 1
continue
else:
total += value
except TypeError:
msg = "Attempted to take average of non-numerical values.\n"
msg += "Expected int or float, got %s: %s" % (value.__class__, value)
logging.error(msg)
logging.debug("FullList: %s", numList)
raise MathAlgoException(msg)
length = len(numList) - skipped
if length < 1:
return average, total
average = total / length
for value in numList:
tmpValue = value - average
stdBase += (tmpValue * tmpValue)
stdDev = math.sqrt(stdBase / length)
if math.isnan(average) or math.isinf(average):
average = 0.0
if math.isnan(stdDev) or math.isinf(average) or not decimal.Decimal(str(stdDev)).is_finite():
stdDev = 0.0
if not isinstance(stdDev, (int, float)):
stdDev = 0.0
return average, stdDev
def createHistogram(numList, nBins, limit):
"""
_createHistogram_
Create a histogram proxy (a list of bins) for a
given list of numbers
"""
average, stdDev = getAverageStdDev(numList = numList)
underflow = []
overflow = []
histEvents = []
histogram = []
for value in numList:
if math.fabs(average - value) <= limit * stdDev:
# Then we counted this event
histEvents.append(value)
elif average < value:
overflow.append(value)
elif average > value:
underflow.append(value)
if len(underflow) > 0:
binAvg, binStdDev = getAverageStdDev(numList=underflow)
histogram.append({'type': 'underflow',
'average': binAvg,
'stdDev': binStdDev,
'nEvents': len(underflow)})
if len(overflow) > 0:
binAvg, binStdDev = getAverageStdDev(numList=overflow)
histogram.append({'type': 'overflow',
'average': binAvg,
'stdDev': binStdDev,
'nEvents': len(overflow)})
if len(histEvents) < 1:
# Nothing to do?
return histogram
histEvents.sort()
upperBound = max(histEvents)
lowerBound = min(histEvents)
if lowerBound == upperBound:
# This is a problem
logging.debug("Only one value in the histogram!")
nBins = 1
upperBound = upperBound + 1
lowerBound = lowerBound - 1
binSize = (upperBound - lowerBound)/nBins
binSize = floorTruncate(binSize)
for x in range(nBins):
lowerEdge = floorTruncate(lowerBound + (x * binSize))
histogram.append({'type': 'standard',
'lowerEdge': lowerEdge,
'upperEdge': lowerEdge + binSize,
'average': 0.0,
'stdDev': 0.0,
'nEvents': 0})
for bin_ in histogram:
if bin_['type'] != 'standard':
continue
binList = []
for value in histEvents:
if value >= bin_['lowerEdge'] and value <= bin_['upperEdge']:
# Then we're in the bin
binList.append(value)
elif value > bin_['upperEdge']:
# Because this is a sorted list we are now out of the bin range
# Calculate our values and break
break
else:
continue
# If we get here, it's because we're out of values in the bin
# Time to do some math
if len(binList) < 1:
# Nothing to do here, leave defaults
continue
binAvg, binStdDev = getAverageStdDev(numList=binList)
bin_['average'] = binAvg
bin_['stdDev'] = binStdDev
bin_['nEvents'] = len(binList)
return histogram
def floorTruncate(value, precision=3):
"""
_floorTruncate_
Truncate a value to a set number of decimal points
Always truncates to a LOWER value, this is so that using it for
histogram binning creates values beneath the histogram lower edge.
"""
prec = math.pow(10, precision)
return math.floor(value * prec)/prec
def sortDictionaryListByKey(dictList, key, reverse=False):
"""
_sortDictionaryListByKey_
Given a list of dictionaries and a key with a numerical
value, sort that dictionary in order of that key's value.
NOTE: If the key does not exist, this will not raise an exception
This is because this is used for sorting of performance histograms
And not all histograms have the same value
"""
return sorted(dictList, key=lambda k: float(k.get(key, 0.0)), reverse=reverse)
def getLargestValues(dictList, key, n=1):
"""
_getLargestValues_
Take a list of dictionaries, sort them by the value of a
particular key, and return the n largest entries.
Key must be a numerical key.
"""
sortedList = sortDictionaryListByKey(dictList=dictList, key=key, reverse=True)
return sortedList[:n]
def validateNumericInput(value):
"""
_validateNumericInput_
Check that the value is actually an usable number
"""
value = float(value)
try:
if math.isnan(value) or math.isinf(value):
return False
except TypeError:
return False
return True
def calculateRunningAverageAndQValue(newPoint, n, oldM, oldQ):
"""
_calculateRunningAverageAndQValue_
Use the algorithm described in:
Donald E. Knuth (1998). The Art of Computer Programming, volume 2: Seminumerical Algorithms, 3rd ed.., p. 232. Boston: Addison-Wesley.
To calculate an average and standard deviation while getting data, the standard deviation
can be obtained from the so-called Q value with the following equation:
sigma = sqrt(Q/n)
This is also contained in the function calculateStdDevFromQ in this module. The average is equal to M.
"""
if not validateNumericInput(newPoint): raise MathAlgoException("Provided a non-valid newPoint")
if not validateNumericInput(n): raise MathAlgoException("Provided a non-valid n")
if n == 1:
M = newPoint
Q = 0.0
else:
if not validateNumericInput(oldM): raise MathAlgoException("Provided a non-valid oldM")
if not validateNumericInput(oldQ): raise MathAlgoException("Provided a non-valid oldQ")
M = oldM + (newPoint - oldM) / n
Q = oldQ + ((n - 1) * (newPoint - oldM) * (newPoint - oldM) / n)
return M, Q
def calculateStdDevFromQ(Q, n):
"""
_calculateStdDevFromQ_
If Q is the sum of the squared differences of some points to their average,
then the standard deviation is given by:
sigma = sqrt(Q/n)
This function calculates that formula
"""
if not validateNumericInput(Q): raise MathAlgoException("Provided a non-valid Q")
if not validateNumericInput(n): raise MathAlgoException("Provided a non-valid n")
sigma = math.sqrt(Q / n)
if not validateNumericInput(sigma): return 0.0
return sigma | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/Algorithms/MathAlgos.py | 0.591841 | 0.33565 | MathAlgos.py | pypi |
from builtins import str as newstr
import random, cherrypy
class RESTError(Exception):
"""Base class for REST errors.
.. attribute:: http_code
Integer, HTTP status code for this error. Also emitted as X-Error-HTTP
header value.
.. attribute:: app_code
Integer, application error code, to be emitted as X-REST-Status header.
.. attribute:: message
String, information about the error, to be emitted as X-Error-Detail
header. Should not contain anything sensitive, and in particular should
never include any unvalidated or unsafe data, e.g. input parameters or
data from a database. Normally a fixed label with one-to-one match with
the :obj:`app-code`. If the text exceeds 200 characters, it's truncated.
Since this is emitted as a HTTP header, it cannot contain newlines or
anything encoding-dependent.
.. attribute:: info
String, additional information beyond :obj:`message`, to be emitted as
X-Error-Info header. Like :obj:`message` should not contain anything
sensitive or unsafe, or text inappropriate for a HTTP response header,
and should be short enough to fit in 200 characters. This is normally
free form text to clarify why the error happened.
.. attribute:: errid
String, random unique identifier for this error, to be emitted as
X-Error-ID header and output into server logs when logging the error.
The purpose is that clients save this id when they receive an error,
and further error reporting or debugging can use this value to identify
the specific error, and for example to grep logs for more information.
.. attribute:: errobj
If the problem was caused by another exception being raised in the code,
reference to the original exception object. For example if the code dies
with an :class:`KeyError`, this is the original exception object. This
error is logged to the server logs when reporting the error, but no
information about it is returned to the HTTP client.
.. attribute:: trace
The origin of the exception as returned by :func:`format_exc`. The full
trace is emitted to the server logs, each line prefixed with timestamp.
This information is not returned to the HTTP client.
"""
http_code = None
app_code = None
message = None
info = None
errid = None
errobj = None
trace = None
def __init__(self, info = None, errobj = None, trace = None):
self.errid = "%032x" % random.randrange(1 << 128)
self.errobj = errobj
self.info = info
self.trace = trace
def __str__(self):
return "%s %s [HTTP %d, APP %d, MSG %s, INFO %s, ERR %s]" \
% (self.__class__.__name__, self.errid, self.http_code, self.app_code,
repr(self.message).replace("\n", " ~~ "),
repr(self.info).replace("\n", " ~~ "),
repr(self.errobj).replace("\n", " ~~ "))
class NotAcceptable(RESTError):
"Client did not specify format it accepts, or no compatible format was found."
http_code = 406
app_code = 201
message = "Not acceptable"
class UnsupportedMethod(RESTError):
"Client used HTTP request method which isn't supported for any API call."
http_code = 405
app_code = 202
message = "Request method not supported"
class MethodWithoutQueryString(RESTError):
"Client provided a query string which isn't acceptable for this request method."
http_code = 405
app_code = 203
message = "Query arguments not supported for this request method"
class APIMethodMismatch(RESTError):
"""Both the API and HTTP request methods are supported, but not in that
combination."""
http_code = 405
app_code = 204
message = "API not supported for this request method"
class APINotSpecified(RESTError):
"The request URL is missing API argument."
http_code = 400
app_code = 205
message = "API not specified"
class NoSuchInstance(RESTError):
"""The request URL is missing instance argument or the specified instance
does not exist."""
http_code = 404
app_code = 206
message = "No such instance"
class APINotSupported(RESTError):
"The request URL provides wrong API argument."
http_code = 404
app_code = 207
message = "API not supported"
class DataCacheEmpty(RESTError):
"The wmstats data cache has not be created."
http_code = 503
app_code = 208
message = "DataCache is Empty"
class DatabaseError(RESTError):
"""Parent class for database-related errors.
.. attribute: lastsql
A tuple of *(sql, binds, kwbinds),* where `sql` is the last SQL statement
executed and `binds`, `kwbinds` are the bind values used with it. Any
sensitive parts like passwords have already been censored from the `sql`
string. Note that for massive requests `binds` or `kwbinds` can get large.
These are logged out in the server logs when reporting the error, but no
information about these are returned to the HTTP client.
.. attribute: intance
String, the database instance for which the error occurred. This is
reported in the error message output to server logs, but no information
about this is returned to the HTTP client."""
lastsql = None
instance = None
def __init__(self, info = None, errobj = None, trace = None,
lastsql = None, instance = None):
RESTError.__init__(self, info, errobj, trace)
self.lastsql = lastsql
self.instance = instance
class DatabaseUnavailable(DatabaseError):
"""The instance argument is correct, but cannot connect to the database.
This error will only occur at initial attempt to connect to the database,
:class:`~.DatabaseConnectionError` is raised instead if the connection
ends prematurely after the transaction has already begun successfully."""
http_code = 503
app_code = 401
message = "Database unavailable"
class DatabaseConnectionError(DatabaseError):
"""Database was available when the operation started, but the connection
was lost or otherwise failed during the application operation."""
http_code = 504
app_code = 402
message = "Database connection failure"
class DatabaseExecutionError(DatabaseError):
"""Database operation failed."""
http_code = 500
app_code = 403
message = "Execution error"
class MissingParameter(RESTError):
"Client did not supply a parameter which is required."
http_code = 400
app_code = 301
message = "Missing required parameter"
class InvalidParameter(RESTError):
"Client supplied invalid value for a parameter."
http_code = 400
app_code = 302
message = "Invalid input parameter"
class MissingObject(RESTError):
"""An object required for the operation is missing. This might be a
pre-requisite needed to create a reference, or attempt to delete
an object which does not exist."""
http_code = 400
app_code = 303
message = "Required object is missing"
class TooManyObjects(RESTError):
"""Too many objects matched specified criteria. Usually this means
more than one object was matched, deleted, or inserted, when only
exactly one should have been subject to the operation."""
http_code = 400
app_code = 304
message = "Too many objects"
class ObjectAlreadyExists(RESTError):
"""An already existing object is on the way of the operation. This
is usually caused by uniqueness constraint violations when creating
new objects."""
http_code = 400
app_code = 305
message = "Object already exists"
class InvalidObject(RESTError):
"The specified object is invalid."
http_code = 400
app_code = 306
message = "Invalid object"
class ExecutionError(RESTError):
"""Input was in principle correct but there was an error processing
the request. This normally means either programming error, timeout, or
an unusual and unexpected problem with the database. For security reasons
little additional information is returned. If the problem persists, client
should contact service operators. The returned error id can be used as a
reference."""
http_code = 500
app_code = 403
message = "Execution error"
def report_error_header(header, val):
"""If `val` is non-empty, set CherryPy response `header` to `val`.
Replaces all newlines with "; " characters. If the resulting value is
longer than 200 characters, truncates it to the first 197 characters
and leaves a trailing ellipsis "..."."""
if val:
val = val.replace("\n", "; ")
if len(val) > 200: val = val[:197] + "..."
cherrypy.response.headers[header] = val
def report_rest_error(err, trace, throw):
"""Report a REST error: generate an appropriate log message, set the
response headers and raise an appropriate :class:`~.HTTPError`.
Normally `throw` would be True to translate the exception `err` into
a HTTP server error, but the function can also be called with `throw`
set to False if the purpose is merely to log an exception message.
:arg err: exception object.
:arg trace: stack trace to use in case `err` doesn't have one.
:arg throw: raise a :class:`~.HTTPError` if True."""
if isinstance(err, DatabaseError) and err.errobj:
offset = None
sql, binds, kwbinds = err.lastsql
if sql and err.errobj.args and hasattr(err.errobj.args[0], 'offset'):
offset = err.errobj.args[0].offset
sql = sql[:offset] + "<**>" + sql[offset:]
cherrypy.log("SERVER DATABASE ERROR %d/%d %s %s.%s %s [instance: %s] (%s);"
" last statement: %s; binds: %s, %s; offset: %s"
% (err.http_code, err.app_code, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
err.errid, err.instance, newstr(err.errobj).rstrip(),
sql, binds, kwbinds, offset))
for line in err.trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, RESTError):
if err.errobj:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s); derived from %s.%s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
newstr(err.errobj).rstrip()))
trace = err.trace
else:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, cherrypy.HTTPError):
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER HTTP ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(200)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.status)
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", err._message)
if throw: raise err
else:
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER OTHER ERROR %s.%s %s (%s)"
% (getattr(err, "__module__", "__builtins__"),
err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = 400
cherrypy.response.headers["X-Error-HTTP"] = 500
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", "Server error")
if throw: raise cherrypy.HTTPError(500, "Server error") | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/REST/Error.py | 0.835752 | 0.247783 | Error.py | pypi |
from builtins import str as newstr, bytes as newbytes
from WMCore.REST.Error import *
import math
import re
import numbers
from Utils.Utilities import decodeBytesToUnicodeConditional, encodeUnicodeToBytesConditional
from Utils.PythonVersion import PY3, PY2
def return_message(main_err, custom_err):
if custom_err:
return custom_err
return main_err
def _arglist(argname, kwargs):
val = kwargs.get(argname, None)
if val == None:
return []
elif not isinstance(val, list):
return [ val ]
else:
return val
def _check_rx(argname, val, custom_err = None):
if not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
return re.compile(val)
except:
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
def _check_str(argname, val, rx, custom_err = None):
"""
This is not really check val is ASCII.
2021 09: we are now using version 17.4.0 -> we do not need to convert to
bytes here anymore, we are using a recent verison of cherrypy.
We merged the funcionality of _check_str and _check_ustr into a single function
:type val: str or bytes (only utf8 encoded string) in py3, unicode or str in py2
:type rx: regex, compiled from native str (unicode in py3, bytes in py2)
"""
val = decodeBytesToUnicodeConditional(val, condition=PY3)
val = encodeUnicodeToBytesConditional(val, condition=PY2)
# `val` should now be a "native str" (unicode in py3, bytes in py2)
# here str has not been redefined. it is default `str` in both py2 and py3.
if not isinstance(val, str) or not rx.match(val):
raise InvalidParameter(return_message("Incorrect '%s' parameter %s %s" % (argname, type(val), val), custom_err))
return val
def _check_num(argname, val, bare, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Integral) and (not isinstance(val, (newstr, newbytes)) or (bare and not val.isdigit())):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = int(val)
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _check_real(argname, val, special, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Number) and not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = float(val)
if not special and (math.isnan(n) or math.isinf(n)):
raise InvalidParameter(return_message("Parameter '%s' improper value" % argname, custom_err))
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _validate_one(argname, param, safe, checker, optional, *args):
val = param.kwargs.get(argname, None)
if optional and val == None:
safe.kwargs[argname] = None
else:
safe.kwargs[argname] = checker(argname, val, *args)
del param.kwargs[argname]
def _validate_all(argname, param, safe, checker, *args):
safe.kwargs[argname] = [checker(argname, v, *args) for v in _arglist(argname, param.kwargs)]
if argname in param.kwargs:
del param.kwargs[argname]
def validate_rx(argname, param, safe, optional = False, custom_err = None):
"""Validates that an argument is a valid regexp.
Checks that an argument named `argname` exists in `param.kwargs`,
and it a string which compiles into a python regular expression.
If successful, the regexp object (not the string) is copied into
`safe.kwargs` and the string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_rx, optional, custom_err)
def validate_str(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
Accepts both unicode strings and utf8-encoded bytes strings as argument
string.
Accepts regex compiled only with "native strings", which means str in both
py2 and py3 (unicode in py3, bytes of utf8-encoded strings in py2)
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_ustr(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp,
During the py2->py3 modernization, _check_str and _check_ustr have been
merged into a single function called _check_str.
This function is now the same as validate_str, but is kept nonetheless
not to break our client's code.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_num(argname, param, safe, optional = False,
bare = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid integer number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is an int or a string convertible to a valid number. If successful
the integer value (not the string) is copied into `safe.kwargs`
and the original int/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
If `bare` is True, the number is required to be a pure digit sequence if it is a string.
Otherwise anything accepted by `int(val)` is acceted, including for
example leading white space or sign. Note that either way arbitrarily
large values are accepted; if you want to prevent abuse against big
integers, use the `minval` and `maxval` thresholds described below,
or check the length the of the string against some limit first.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_num, optional, bare, minval, maxval, custom_err)
def validate_real(argname, param, safe, optional = False,
special = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid real number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is float number or a string convertible to a valid number. If successful
the float value (not the string) is copied into `safe.kwargs`
and the original float/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
Anything accepted by `float(val)` is accepted, including for example
leading white space, sign and exponent. However NaN and +/- Inf are
rejected unless `special` is True.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_real, optional, special, minval, maxval, custom_err)
def validate_rxlist(argname, param, safe, custom_err = None):
"""Validates that an argument is an array of strings, each of which
can be compiled into a python regexp object.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which compiles into a regular expression.
If successful the array is copied into `safe.kwargs` and the value is
removed from `param.kwargs`. The value always becomes an array in
`safe.kwargs`, even if no or only one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_rx, custom_err)
def validate_strlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_ustrlist` instead if the argument string might need
to be converted from utf-8 into unicode first. Use this method only
for inputs which are meant to be bare strings.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_str, rx, custom_err)
def validate_ustrlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp once converted from utf-8 into unicode.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_strlist` instead if the argument strings should always
be bare strings. This one automatically converts everything into
unicode and expects input exclusively in utf-8, which may not be
appropriate constraints for some uses.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_ustr, rx, custom_err)
def validate_numlist(argname, param, safe, bare=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_num()`.
Checks that an argument named `argname` is either a single string/int or
an array of strings/int, each of which validates with `validate_num` and
`bare`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `kwsafe`, even if no or only one
argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_num, bare, minval, maxval, custom_err)
def validate_reallist(argname, param, safe, special=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_real()`.
Checks that an argument named `argname` is either a single string/float or
an array of strings/floats, each of which validates with `validate_real` and
`special`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `safe.kwargs`, even if no or only
one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_real, special, minval, maxval, custom_err)
def validate_no_more_input(param):
"""Verifies no more input is left in `param.args` or `param.kwargs`."""
if param.args:
raise InvalidParameter("Excess path arguments, not validated args='%s'" % param.args)
if param.kwargs:
raise InvalidParameter("Excess keyword arguments, not validated kwargs='%s'" % param.kwargs)
def validate_lengths(safe, *names):
"""Verifies that all `names` exist in `safe.kwargs`, are lists, and
all the lists have the same length. This is convenience function for
checking that an API accepting multiple values receives equal number
of values for all of its parameters."""
refname = names[0]
if refname not in safe.kwargs or not isinstance(safe.kwargs[refname], list):
raise InvalidParameter("Incorrect '%s' parameter" % refname)
reflen = len(safe.kwargs[refname])
for other in names[1:]:
if other not in safe.kwargs or not isinstance(safe.kwargs[other], list):
raise InvalidParameter("Incorrect '%s' parameter" % other)
elif len(safe.kwargs[other]) != reflen:
raise InvalidParameter("Mismatched number of arguments: %d %s vs. %d %s"
% (reflen, refname, len(safe.kwargs[other]), other)) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/REST/Validation.py | 0.600774 | 0.233335 | Validation.py | pypi |
from __future__ import print_function
import gzip
from builtins import str, bytes, object
from Utils.PythonVersion import PY3
from Utils.Utilities import encodeUnicodeToBytes, encodeUnicodeToBytesConditional
from future.utils import viewitems
import hashlib
import json
import xml.sax.saxutils
import zlib
from traceback import format_exc
import cherrypy
from WMCore.REST.Error import RESTError, ExecutionError, report_rest_error
try:
from cherrypy.lib import httputil
except ImportError:
from cherrypy.lib import http as httputil
def vary_by(header):
"""Add 'Vary' header for `header`."""
varies = cherrypy.response.headers.get('Vary', '')
varies = [x.strip() for x in varies.split(",") if x.strip()]
if header not in varies:
varies.append(header)
cherrypy.response.headers['Vary'] = ", ".join(varies)
def is_iterable(obj):
"""Check if `obj` is iterable."""
try:
iter(obj)
except TypeError:
return False
else:
return True
class RESTFormat(object):
def __call__(self, stream, etag):
"""Main entry point for generating output for `stream` using `etag`
object to generate ETag header. Returns a generator function for
producing a verbatim copy of `stream` item, including any premables
and trailers needed for the selected format. The intention is that
the caller will use the iterable to generate chunked HTTP transfer
encoding, or a simple result such as an image."""
# Make 'stream' iterable. We convert everything to chunks here.
# The final stream consumer will collapse small responses back
# to a single string. Convert files to 1MB chunks.
if stream is None:
stream = ['']
elif isinstance(stream, (str, bytes)):
stream = [stream]
elif hasattr(stream, "read"):
# types.FileType is not available anymore in python3,
# using it raises pylint W1624.
# Since cherrypy.lib.file_generator only uses the .read() attribute
# of a file, we simply check if stream.read() is present instead.
# https://github.com/cherrypy/cherrypy/blob/2a8aaccd649eb1011382c39f5cd93f76f980c0b1/cherrypy/lib/__init__.py#L64
stream = cherrypy.lib.file_generator(stream, 512 * 1024)
return self.stream_chunked(stream, etag, *self.chunk_args(stream))
def chunk_args(self, stream):
"""Return extra arguments needed for `stream_chunked()`. The default
return an empty tuple, so no extra arguments. Override in the derived
class if `stream_chunked()` needs preamble or trailer arguments."""
return tuple()
class XMLFormat(RESTFormat):
"""Format an iterable of objects into XML encoded in UTF-8.
Generates normally first a preamble, a stream of XML-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then XML encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if iterating over input is
deterministic. Beware in particular the key order for a dict is
arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is generated as an XML document whose top-level entity name
is defined by the label given at the formatter construction time. The
caller must define ``cherrypy.request.rest_generate_data`` to element
name for wrapping stream contents. Usually the top-level entity is the
application name and the ``cherrypy.request.rest_generate_data`` is
``result``.
Iterables are output as ``<array><i>ITEM</i><i>ITEM</i></array>``,
dictionaries as ``<dict><key>KEY</key><value>VALUE</value></dict>``.
`None` is output as empty contents, and hence there is no way to
distinguish `None` and an empty string from each other. Scalar types
are output as rendered by `str()`, but obviously XML encoding unsafe
characters. This class does not support formatting arbitrary types.
The formatter does not insert any spaces into the output. Although the
output is generated as a preamble, stream of objects, and trailer just
like by the `JSONFormatter`, each of which is a separate HTTP transfer
chunk, the output does *not* have guaranteed line-oriented structure
like the `JSONFormatter` produces. Note in particular that if the data
stream contains strings with newlines, the output will have arbitrary
line structure. On the other hand, as the output is well-formed XML,
virtually all SAX processors can read the stream incrementally even if
the client isn't able to fully preserve chunked HTTP transfer encoding."""
def __init__(self, label):
self.label = label
@staticmethod
def format_obj(obj):
"""Render an object `obj` into XML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
result = xml.sax.saxutils.escape(obj).encode("utf-8")
elif isinstance(obj, bytes):
result = xml.sax.saxutils.escape(obj)
elif isinstance(obj, (int, float, bool)):
result = xml.sax.saxutils.escape(str(obj)).encode("utf-8")
elif isinstance(obj, dict):
result = "<dict>"
for k, v in viewitems(obj):
result += "<key>%s</key><value>%s</value>" % \
(xml.sax.saxutils.escape(k).encode("utf-8"),
XMLFormat.format_obj(v))
result += "</dict>"
elif is_iterable(obj):
result = "<array>"
for v in obj:
result += "<i>%s</i>" % XMLFormat.format_obj(v)
result += "</array>"
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = XMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n"
preamble += "<%s>" % self.label
if cherrypy.request.rest_generate_preamble:
desc = self.format_obj(cherrypy.request.rest_generate_preamble)
preamble += "<desc>%s</desc>" % desc
preamble += "<%s>" % cherrypy.request.rest_generate_data
trailer = "</%s></%s>" % (cherrypy.request.rest_generate_data, self.label)
return preamble, trailer
class JSONFormat(RESTFormat):
"""Format an iterable of objects into JSON.
Generates normally first a preamble, a stream of JSON-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then JSON encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if `cjson.encode()` output is
deterministic for the input. Beware in particular the key order for a
dict is arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is always generated as a JSON dictionary. The caller must
define ``cherrypy.request.rest_generate_data`` as the key for actual
contents, usually something like "result". The `stream` value will be
generated as an array value for that key.
If ``cherrypy.request.rest_generate_preamble`` is a non-empty list, it
is output as the ``desc`` key value in the preamble before outputting
the `stream` contents. Otherwise the output consists solely of `stream`.
A common use of ``rest_generate_preamble`` is list of column labels
with `stream` an iterable of lists of column values.
The output is guaranteed to contain one line of preamble which starts a
dictionary and an array ("``{key: [``"), one line of JSON rendering of
each object in `stream`, with the first line starting with exactly one
space and second and subsequent lines starting with a comma, and one
final trailer line consisting of "``]}``". Each line is generated as a
HTTP transfer chunk. This format is fixed so readers can be constructed
to read and parse the stream incrementally one line at a time,
facilitating maximum throughput processing of the response."""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
obj = None
try:
for obj in stream:
chunk = comma + json.dumps(obj) + "\n"
etag.update(chunk)
yield chunk
comma = ","
except cherrypy.HTTPError:
raise
except GeneratorExit:
etag.invalidate()
trailer = None
raise
except Exception as exp:
print("ERROR, json.dumps failed to serialize %s, type %s\nException: %s" \
% (obj, type(obj), str(exp)))
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except cherrypy.HTTPError:
raise
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as JSON reply."""
comma = ""
preamble = "{"
trailer = "]}\n"
if cherrypy.request.rest_generate_preamble:
desc = json.dumps(cherrypy.request.rest_generate_preamble)
preamble += '"desc": %s' % desc
comma = ", "
preamble += '%s"%s": [\n' % (comma, cherrypy.request.rest_generate_data)
return preamble, trailer
class PrettyJSONFormat(JSONFormat):
""" Format used for human, (web browser)"""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = comma + json.dumps(obj, indent=2)
etag.update(chunk)
yield chunk
comma = ","
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
class PrettyJSONHTMLFormat(PrettyJSONFormat):
""" Format used for human, (web browser) wrap around html tag on json"""
@staticmethod
def format_obj(obj):
"""Render an object `obj` into HTML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
obj = xml.sax.saxutils.quoteattr(obj)
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, bytes):
obj = xml.sax.saxutils.quoteattr(str(obj, "utf-8"))
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, (int, float, bool)):
result = "%s" % obj
elif isinstance(obj, dict):
result = "<ul>"
for k, v in viewitems(obj):
result += "<li><b>%s</b>: %s</li>" % (k, PrettyJSONHTMLFormat.format_obj(v))
result += "</ul>"
elif is_iterable(obj):
empty = True
result = "<details open><ul>"
for v in obj:
empty = False
result += "<li>%s</li>" % PrettyJSONHTMLFormat.format_obj(v)
result += "</ul></details>"
if empty:
result = ""
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = PrettyJSONHTMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<html><body>"
trailer = "</body></html>"
return preamble, trailer
class RawFormat(RESTFormat):
"""Format an iterable of objects as raw data.
Generates raw data completely unmodified, for example image data or
streaming arbitrary external data files including even plain text.
Computes an ETag on the output in the process. The result is always
chunked, even simple strings on input. Usually small enough responses
will automatically be converted back to a single string response post
compression and ETag processing.
Any exceptions raised by input stream are reported to `report_rest_error`
and swallowed, as this is normally used to generate output for CherryPy
responses, which cannot handle exceptions reasonably after the output
generation begins; later processing may reconvert those back to exceptions
however (cf. stream_maybe_etag()). A X-REST-Status trailer header is added
if (and only if) an exception occurs; the client must inspect that to find
out if it got the complete output. There is normally 'X-REST-Status: 100'
in normal response headers, and it remains valid in case of success.
No ETag header is generated in case of an exception."""
def stream_chunked(self, stream, etag):
"""Generator for actually producing the output."""
try:
for chunk in stream:
etag.update(chunk)
yield chunk
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
except BaseException:
etag.invalidate()
raise
class DigestETag(object):
"""Compute hash digest over contents for ETag header."""
algorithm = None
def __init__(self, algorithm=None):
"""Prepare ETag computer."""
self.digest = hashlib.new(algorithm or self.algorithm)
def update(self, val):
"""Process response data `val`."""
if self.digest:
self.digest.update(encodeUnicodeToBytes(val))
def value(self):
"""Return ETag header value for current input."""
return self.digest and '"%s"' % self.digest.hexdigest()
def invalidate(self):
"""Invalidate the ETag calculator so value() will return None."""
self.digest = None
class MD5ETag(DigestETag):
"""Compute MD5 hash over contents for ETag header."""
algorithm = 'md5'
class SHA1ETag(DigestETag):
"""Compute SHA1 hash over contents for ETag header."""
algorithm = 'sha1'
def _stream_compress_identity(reply, *args):
"""Streaming compressor which returns original data unchanged."""
return reply
def _stream_compress_deflate(reply, compress_level, max_chunk):
"""Streaming compressor for the 'deflate' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
# Create zlib compression object, with raw data stream (negative window size)
z = zlib.compressobj(compress_level, zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
# Data pending compression. We only take entire chunks from original
# reply. Then process reply one chunk at a time. Whenever we have enough
# data to compress, spit it out flushing the zlib engine entirely, so we
# respect original chunk boundaries.
npending = 0
pending = []
for chunk in reply:
pending.append(chunk)
npending += len(chunk)
if npending >= max_chunk:
part = z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FULL_FLUSH)
pending = []
npending = 0
yield part
# Crank the compressor one more time for remaining output.
if npending:
yield z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FINISH)
def _stream_compress_gzip(reply, compress_level, *args):
"""Streaming compressor for the 'gzip' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
data = []
for chunk in reply:
data.append(chunk)
if data:
yield gzip.compress(encodeUnicodeToBytes("".join(data)), compress_level)
# : Stream compression methods.
_stream_compressor = {
'identity': _stream_compress_identity,
'deflate': _stream_compress_deflate,
'gzip': _stream_compress_gzip
}
def stream_compress(reply, available, compress_level, max_chunk):
"""If compression has been requested via Accept-Encoding request header,
and is granted for this response via `available` compression methods,
convert the streaming `reply` into another streaming response which is
compressed at the exact chunk boundaries of the original response,
except that individual chunks may be coalesced up to `max_chunk` size.
The `compression_level` tells how hard to compress, zero disables the
compression entirely."""
global _stream_compressor
for enc in cherrypy.request.headers.elements('Accept-Encoding'):
if enc.value not in available:
continue
elif enc.value in _stream_compressor and compress_level > 0:
# Add 'Vary' header for 'Accept-Encoding'.
vary_by('Accept-Encoding')
# Compress contents at original chunk boundaries.
if 'Content-Length' in cherrypy.response.headers:
del cherrypy.response.headers['Content-Length']
cherrypy.response.headers['Content-Encoding'] = enc.value
return _stream_compressor[enc.value](reply, compress_level, max_chunk)
return reply
def _etag_match(status, etagval, match, nomatch):
"""Match ETag value against any If-Match / If-None-Match headers."""
# Execute conditions only for status 2xx. We only handle GET/HEAD
# requests here, it makes no sense to try to do this for PUT etc.
# as they need to be handled as request pre-condition, not in the
# streaming out part here.
if cherrypy.request.method in ('GET', 'HEAD'):
status, dummyReason, dummyMsg = httputil.valid_status(status)
if status >= 200 and status <= 299:
if match and ("*" in match or etagval in match):
raise cherrypy.HTTPError(412, "Precondition on ETag %s failed" % etagval)
if nomatch and ("*" in nomatch or etagval in nomatch):
raise cherrypy.HTTPRedirect([], 304)
def _etag_tail(head, tail, etag):
"""Generator which first returns anything in `head`, then `tail`.
Sets ETag header at the end to value of `etag` if it's defined and
yields a value."""
for chunk in head:
yield encodeUnicodeToBytes(chunk)
for chunk in tail:
yield encodeUnicodeToBytes(chunk)
etagval = (etag and etag.value())
if etagval:
cherrypy.response.headers["ETag"] = etagval
def stream_maybe_etag(size_limit, etag, reply):
"""Maybe generate ETag header for the response, and handle If-Match
and If-None-Match request headers. Consumes the reply until at most
`size_limit` bytes. If the response fits into that size, adds the
ETag header and matches it against any If-Match / If-None-Match
request headers and replies appropriately.
If the response is fully buffered, and the `reply` generator actually
results in an error and sets X-Error-HTTP / X-Error-Detail headers,
converts that error back into a real HTTP error response. Otherwise
responds with the fully buffered body directly, without generator
and chunking. In other words, responses smaller than `size_limit`
are always fully buffered and replied immediately without chunking.
If the response is not fully buffered, it's guaranteed to be output
at original chunk boundaries.
Note that if this function is fed the output from `stream_compress()`
as it normally would be, the `size_limit` constrains the compressed
size, and chunk boundaries correspond to compressed chunks."""
req = cherrypy.request
res = cherrypy.response
match = [str(x) for x in (req.headers.elements('If-Match') or [])]
nomatch = [str(x) for x in (req.headers.elements('If-None-Match') or [])]
# If ETag is already set, match conditions and output without buffering.
etagval = res.headers.get('ETag', None)
if etagval:
_etag_match(res.status or 200, etagval, match, nomatch)
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail([], reply, None)
# Buffer up to size_limit bytes internally. This interally builds up the
# ETag value inside 'etag'. In case of exceptions the ETag invalidates.
# If we exceed the limit, fall back to streaming without checking ETag
# against If-Match/If-None-Match. We'll still set the ETag in the trailer
# headers, so clients which understand trailers will get the value; most
# clients including browsers will ignore them.
size = 0
result = []
for chunk in reply:
result.append(chunk)
size += len(chunk)
if size > size_limit:
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail(result, reply, etag)
# We've buffered the entire response, but it may be an error reply. The
# generator code does not know if it's allowed to raise exceptions, so
# it swallows all errors and converts them into X-* headers. We recover
# the original HTTP response code and message from X-Error-{HTTP,Detail}
# headers, if any are present.
err = res.headers.get('X-Error-HTTP', None)
if err:
message = res.headers.get('X-Error-Detail', 'Original error lost')
raise cherrypy.HTTPError(int(err), message)
# OK, we buffered the entire reply and it's ok. Check ETag match criteria.
# The original stream generator must guarantee that if it fails it resets
# the 'etag' value, even if the error handlers above didn't run.
etagval = etag.value()
if etagval:
res.headers['ETag'] = etagval
_etag_match(res.status or 200, etagval, match, nomatch)
# OK, respond with the buffered reply as a plain string.
res.headers['Content-Length'] = size
# TODO investigate why `result` is a list of bytes strings in py3
# The current solution seems to work in both py2 and py3
resp = b"" if PY3 else ""
for item in result:
resp += encodeUnicodeToBytesConditional(item, condition=PY3)
assert len(resp) == size
return resp | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/REST/Format.py | 0.843605 | 0.218909 | Format.py | pypi |
from Utils.Utilities import encodeUnicodeToBytes
from future.utils import viewitems, viewvalues, listitems
import os, hmac, hashlib, cherrypy
from tempfile import NamedTemporaryFile
from Utils.PythonVersion import PY3
from WMCore.REST.Main import RESTMain
from WMCore.REST.Auth import authz_canonical
from WMCore.Configuration import Configuration
def fake_authz_headers(hmac_key, method = 'HNLogin',
login='testuser', name='Test User',
dn="/test/dn", roles={}, format="list"):
"""Create fake authentication and authorisation headers compatible
with the CMSWEB front-ends. Assumes you have the HMAC signing key
the back-end will use to validate the headers.
:arg str hmac_key: binary key data for signing headers.
:arg str method: authentication method, one of X509Cert, X509Proxy,
HNLogin, HostIP, AUCookie or None.
:arg str login: account login name.
:arg str name: account user name.
:arg str dn: account X509 subject.
:arg dict roles: role dictionary, each role with 'site' and 'group' lists.
:returns: list of header name, value tuples to add to a HTTP request."""
headers = { 'cms-auth-status': 'OK', 'cms-authn-method': method }
if login:
headers['cms-authn-login'] = login
if name:
headers['cms-authn-name'] = name
if dn:
headers['cms-authn-dn'] = dn
for name, role in viewitems(roles):
name = 'cms-authz-' + authz_canonical(name)
headers[name] = []
for r in 'site', 'group':
if r in role:
headers[name].extend(["%s:%s" % (r, authz_canonical(v)) for v in role[r]])
headers[name] = " ".join(headers[name])
prefix = suffix = ""
hkeys = list(headers)
for hk in sorted(hkeys):
if hk != 'cms-auth-status':
prefix += "h%xv%x" % (len(hk), len(headers[hk]))
suffix += "%s%s" % (hk, headers[hk])
msg = prefix + "#" + suffix
if PY3:
hmac_key = encodeUnicodeToBytes(hmac_key)
msg = encodeUnicodeToBytes(msg)
cksum = hmac.new(hmac_key, msg, hashlib.sha1).hexdigest()
headers['cms-authn-hmac'] = cksum
if format == "list":
return listitems(headers)
else:
return headers
def fake_authz_key_file(delete=True):
"""Create temporary file for fake authorisation hmac signing key.
:returns: Instance of :class:`~.NamedTemporaryFile`, whose *data*
attribute contains the HMAC signing binary key."""
t = NamedTemporaryFile(delete=delete)
with open("/dev/urandom", "rb") as fd:
t.data = fd.read(20)
t.write(t.data)
t.seek(0)
return t
def setup_dummy_server(module_name, class_name, app_name = None, authz_key_file=None, port=8888):
"""Helper function to set up a :class:`~.RESTMain` server from given
module and class. Creates a fake server configuration and instantiates
the server application from it.
:arg str module_name: module from which to import test class.
:arg str class_type: name of the server test class.
:arg str app_name: optional test application name, 'test' by default.
:returns: tuple with the server object and authz hmac signing key."""
if authz_key_file:
test_authz_key = authz_key_file
else:
test_authz_key = fake_authz_key_file()
cfg = Configuration()
main = cfg.section_('main')
main.application = app_name or 'test'
main.silent = True
main.index = 'top'
main.authz_defaults = { 'role': None, 'group': None, 'site': None }
main.section_('tools').section_('cms_auth').key_file = test_authz_key.name
app = cfg.section_(app_name or 'test')
app.admin = 'dada@example.org'
app.description = app.title = 'Test'
views = cfg.section_('views')
top = views.section_('top')
top.object = module_name + "." + class_name
server = RESTMain(cfg, os.getcwd())
server.validate_config()
server.setup_server()
server.install_application()
cherrypy.config.update({'server.socket_port': port})
cherrypy.config.update({'server.socket_host': '127.0.0.1'})
cherrypy.config.update({'request.show_tracebacks': True})
cherrypy.config.update({'environment': 'test_suite'})
for app in viewvalues(cherrypy.tree.apps):
if '/' in app.config:
app.config["/"]["request.show_tracebacks"] = True
return server, test_authz_key | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/REST/Test.py | 0.631935 | 0.193147 | Test.py | pypi |
from __future__ import division, print_function, absolute_import
from future import standard_library
standard_library.install_aliases()
# system modules
import json
import logging
import math
import re
import time
from urllib.parse import quote, unquote
# WMCore modules
from Utils.IteratorTools import grouper
from Utils.CertTools import ckey, cert
from WMCore.Services.pycurl_manager import RequestHandler
from WMCore.Services.pycurl_manager import getdata as multi_getdata
# DBS agregators
from dbs.apis.dbsClient import aggRuns, aggFileLumis
# static variables
STEP_PAT = re.compile(r'Step[0-9]')
TASK_PAT = re.compile(r'Task[0-9]')
def hasHTTPFailed(row):
"""
Evaluates whether the HTTP request through PyCurl failed or not.
:param row: dictionary data returned from pycurl_manager module
:return: a boolean confirming failure or not
"""
if 'data' not in row:
return True
if int(row.get('code', 200)) == 200:
return False
return True
def getMSLogger(verbose, logger=None):
"""
_getMSLogger_
Return a logger object using the standard WMCore formatter
:param verbose: boolean setting debug or not
:return: a logger object
"""
if logger:
return logger
verbose = logging.DEBUG if verbose else logging.INFO
logger = logging.getLogger()
logging.basicConfig(format="%(asctime)s:%(levelname)s:%(module)s: %(message)s",
level=verbose)
return logger
def isRelVal(reqDict):
"""
Helper function to evaluate whether the workflow is RelVal or not.
:param reqDict: dictionary with the workflow description
:return: True if it's a RelVal workflow, otherwise False
"""
return reqDict.get("SubRequestType", "") in ['RelVal', 'HIRelVal']
def dbsInfo(datasets, dbsUrl):
"Provides DBS info about dataset blocks"
datasetBlocks = {}
datasetSizes = {}
datasetTransfers = {}
if not datasets:
return datasetBlocks, datasetSizes, datasetTransfers
urls = ['%s/blocks?detail=True&dataset=%s' % (dbsUrl, d) for d in datasets]
logging.info("Executing %d requests against DBS 'blocks' API, with details", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if hasHTTPFailed(row):
print("FAILURE: dbsInfo for %s. Error: %s %s" % (dataset, row.get('code'), row.get('error')))
continue
rows = json.loads(row['data'])
blocks = []
size = 0
datasetTransfers.setdefault(dataset, {}) # flat dict in the format of blockName: blockSize
for item in rows:
blocks.append(item['block_name'])
size += item['block_size']
datasetTransfers[dataset].update({item['block_name']: item['block_size']})
datasetBlocks[dataset] = blocks
datasetSizes[dataset] = size
return datasetBlocks, datasetSizes, datasetTransfers
def getPileupDocs(mspileupUrl, queryDict):
"""
Fetch documents from MSPileup according to the query passed in.
:param mspileupUrl: string with the MSPileup url
:param queryDict: dictionary with the MongoDB query to run
:return: returns a list with all the pileup objects, or raises
an exception in case of failure
"""
mgr = RequestHandler()
headers = {'Content-Type': 'application/json'}
data = mgr.getdata(mspileupUrl, queryDict, headers, verb='POST',
ckey=ckey(), cert=cert(), encode=True, decode=True)
if data and data.get("result", []):
if "error" in data["result"][0]:
msg = f"Failed to retrieve MSPileup documents with query: {queryDict}"
msg += f" and error message: {data}"
raise RuntimeError(msg)
return data["result"]
def getPileupDatasetSizes(datasets, phedexUrl):
"""
Given a list of datasets, find all their blocks with replicas
available, i.e., blocks that have valid files to be processed,
and calculate the total dataset size
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:return: a dictionary of datasets and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
sizeByDset = {}
if not datasets:
return sizeByDset
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
sizeByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
sizeByDset.setdefault(dataset, 0)
try:
for item in rows['phedex']['block']:
sizeByDset[dataset] += item['bytes']
except Exception as exc:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s" % (dataset, str(exc)))
sizeByDset[dataset] = None
return sizeByDset
def getBlockReplicasAndSize(datasets, phedexUrl, group=None):
"""
Given a list of datasets, find all their blocks with replicas
available (thus blocks with at least 1 valid file), completed
and subscribed.
If PhEDEx group is provided, make sure it's subscribed under that
same group.
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:param group: optional PhEDEx group name
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
dsetBlockSize = {}
if not datasets:
return dsetBlockSize
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getBlockReplicasAndSize for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
dsetBlockSize.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
dsetBlockSize.setdefault(dataset, {})
try:
for item in rows['phedex']['block']:
block = {item['name']: {'blockSize': item['bytes'], 'locations': []}}
for repli in item['replica']:
if repli['complete'] == 'y' and repli['subscribed'] == 'y':
if not group:
block[item['name']]['locations'].append(repli['node'])
elif repli['group'] == group:
block[item['name']]['locations'].append(repli['node'])
dsetBlockSize[dataset].update(block)
except Exception as exc:
print("Failure in getBlockReplicasAndSize for dataset %s. Error: %s" % (dataset, str(exc)))
dsetBlockSize[dataset] = None
return dsetBlockSize
def getPileupSubscriptions(datasets, phedexUrl, group=None, percentMin=99):
"""
Provided a list of datasets, find dataset level subscriptions where it's
as complete as `percent_min`.
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:param group: optional string with the PhEDEx group
:param percent_min: only return subscriptions that are this complete
:return: a dictionary of datasets and a list of their location.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
locationByDset = {}
if not datasets:
return locationByDset
if group:
url = "%s/subscriptions?group=%s" % (phedexUrl, group)
url += "&percent_min=%s&dataset=%s"
else:
url = "%s/subscriptions?" % phedexUrl
url += "percent_min=%s&dataset=%s"
urls = [url % (percentMin, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'subscriptions' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].rsplit('=')[-1]
if row['data'] is None:
print("Failure in getPileupSubscriptions for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
locationByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
locationByDset.setdefault(dataset, [])
try:
for item in rows['phedex']['dataset']:
for subs in item['subscription']:
locationByDset[dataset].append(subs['node'])
except Exception as exc:
print("Failure in getPileupSubscriptions for dataset %s. Error: %s" % (dataset, str(exc)))
locationByDset[dataset] = None
return locationByDset
def getBlocksByDsetAndRun(datasetName, runList, dbsUrl):
"""
Given a dataset name and a list of runs, find all the blocks
:return: flat list of blocks
"""
blocks = set()
if isinstance(runList, set):
runList = list(runList)
urls = []
for runSlice in grouper(runList, 50):
urls.append('%s/blocks?run_num=%s&dataset=%s' % (dbsUrl, str(runSlice).replace(" ", ""), datasetName))
logging.info("Executing %d requests against DBS 'blocks' API, with run_num list", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].rsplit('=')[-1]
if hasHTTPFailed(row):
msg = "Failure in getBlocksByDsetAndRun for %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
for item in rows:
blocks.add(item['block_name'])
return list(blocks)
def getFileLumisInBlock(blocks, dbsUrl, validFileOnly=1):
"""
Given a list of blocks, find their file run lumi information
in DBS for up to 10 blocks concurrently
:param blocks: list of block names
:param dbsUrl: string with the DBS URL
:param validFileOnly: integer flag for valid files only or not
:return: a dict of blocks with list of file/run/lumi info
"""
runLumisByBlock = {}
urls = ['%s/filelumis?validFileOnly=%d&block_name=%s' % (dbsUrl, validFileOnly, quote(b)) for b in blocks]
# limit it to 10 concurrent calls not to overload DBS
logging.info("Executing %d requests against DBS 'filelumis' API, concurrency limited to 10", len(urls))
data = multi_getdata(urls, ckey(), cert(), num_conn=10)
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
if hasHTTPFailed(row):
msg = "Failure in getFileLumisInBlock for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
rows = aggFileLumis(rows) # adjust to DBS Go server output
runLumisByBlock.setdefault(blockName, [])
for item in rows:
runLumisByBlock[blockName].append(item)
return runLumisByBlock
def findBlockParents(blocks, dbsUrl):
"""
Helper function to find block parents given a list of block names.
Return a dictionary in the format of:
{"child dataset name": {"child block": ["parent blocks"],
"child block": ["parent blocks"], ...}}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
parentsByBlock = {}
urls = ['%s/blockparents?block_name=%s' % (dbsUrl, quote(b)) for b in blocks]
logging.info("Executing %d requests against DBS 'blockparents' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
dataset = blockName.split("#")[0]
if hasHTTPFailed(row):
print("Failure in findBlockParents for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error')))
parentsByBlock.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
try:
if dataset in parentsByBlock and parentsByBlock[dataset] is None:
# then one of the block calls has failed, keep it failed!
continue
parentsByBlock.setdefault(dataset, {})
for item in rows:
parentsByBlock[dataset].setdefault(item['this_block_name'], set())
parentsByBlock[dataset][item['this_block_name']].add(item['parent_block_name'])
except Exception as exc:
print("Failure in findBlockParents for block %s. Error: %s" % (blockName, str(exc)))
parentsByBlock[dataset] = None
return parentsByBlock
def getRunsInBlock(blocks, dbsUrl):
"""
Provided a list of block names, find their run numbers
:param blocks: list of block names
:param dbsUrl: string with the DBS URL
:return: a dictionary of block names and a list of run numbers
"""
runsByBlock = {}
urls = ['%s/runs?block_name=%s' % (dbsUrl, quote(b)) for b in blocks]
logging.info("Executing %d requests against DBS 'runs' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
if hasHTTPFailed(row):
msg = "Failure in getRunsInBlock for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
rows = aggRuns(rows) # adjust to DBS Go server output
runsByBlock[blockName] = rows[0]['run_num']
return runsByBlock
def getWorkflow(requestName, reqMgrUrl):
"Get list of workflow info from ReqMgr2 data-service for given request name"
headers = {'Accept': 'application/json'}
params = {}
url = '%s/data/request/%s' % (reqMgrUrl, requestName)
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
data = json.loads(res)
return data.get('result', [])
def getDetoxQuota(url):
"Get list of workflow info from ReqMgr2 data-service for given request name"
headers = {}
params = {}
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
res = res.split('\n')
return res
def eventsLumisInfo(inputs, dbsUrl, validFileOnly=0, sumOverLumi=0):
"Get information about events and lumis for given set of inputs: blocks or datasets"
what = 'dataset'
eventsLumis = {}
if not inputs:
return eventsLumis
if '#' in inputs[0]: # inputs are list of blocks
what = 'block_name'
urls = ['%s/filesummaries?validFileOnly=%s&sumOverLumi=%s&%s=%s'
% (dbsUrl, validFileOnly, sumOverLumi, what, quote(i)) for i in inputs]
data = multi_getdata(urls, ckey(), cert())
for row in data:
data = unquote(row['url'].split('=')[-1])
if hasHTTPFailed(row):
print("FAILURE: eventsLumisInfo for %s. Error: %s %s" % (data,
row.get('code'),
row.get('error')))
continue
rows = json.loads(row['data'])
for item in rows:
eventsLumis[data] = item
return eventsLumis
def getEventsLumis(dataset, dbsUrl, blocks=None, eventsLumis=None):
"Helper function to return number of events/lumis for given dataset or blocks"
nevts = nlumis = 0
if blocks:
missingBlocks = [b for b in blocks if b not in eventsLumis]
if missingBlocks:
eLumis = eventsLumisInfo(missingBlocks, dbsUrl)
eventsLumis.update(eLumis)
for block in blocks:
data = eventsLumis[block]
nevts += data['num_event']
nlumis += data['num_lumi']
return nevts, nlumis
if eventsLumis and dataset in eventsLumis:
data = eventsLumis[dataset]
return data['num_event'], data['num_lumi']
eLumis = eventsLumisInfo([dataset], dbsUrl)
data = eLumis.get(dataset, {'num_event': 0, 'num_lumi': 0})
return data['num_event'], data['num_lumi']
def getComputingTime(workflow, eventsLumis=None, unit='h', dbsUrl=None, logger=None):
"Return computing time per give workflow"
logger = getMSLogger(verbose=True, logger=logger)
cput = None
if 'InputDataset' in workflow:
dataset = workflow['InputDataset']
if 'BlockWhitelist' in workflow and workflow['BlockWhitelist']:
nevts, _ = getEventsLumis(dataset, dbsUrl, workflow['BlockWhitelist'], eventsLumis)
else:
nevts, _ = getEventsLumis(dataset, dbsUrl, eventsLumis=eventsLumis)
tpe = workflow['TimePerEvent']
cput = nevts * tpe
elif 'Chain' in workflow['RequestType']:
base = workflow['RequestType'].replace('Chain', '')
itask = 1
cput = 0
carryOn = {}
while True:
t = '%s%d' % (base, itask)
itask += 1
if t in workflow:
task = workflow[t]
if 'InputDataset' in task:
dataset = task['InputDataset']
if 'BlockWhitelist' in task and task['BlockWhitelist']:
nevts, _ = getEventsLumis(dataset, dbsUrl, task['BlockWhitelist'], eventsLumis)
else:
nevts, _ = getEventsLumis(dataset, dbsUrl, eventsLumis=eventsLumis)
elif 'Input%s' % base in task:
nevts = carryOn[task['Input%s' % base]]
elif 'RequestNumEvents' in task:
nevts = float(task['RequestNumEvents'])
else:
logger.debug("this is not supported, making it zero cput")
nevts = 0
tpe = task.get('TimePerEvent', 1)
carryOn[task['%sName' % base]] = nevts
if 'FilterEfficiency' in task:
carryOn[task['%sName' % base]] *= task['FilterEfficiency']
cput += tpe * nevts
else:
break
else:
nevts = float(workflow.get('RequestNumEvents', 0))
feff = float(workflow.get('FilterEfficiency', 1))
tpe = workflow.get('TimePerEvent', 1)
cput = nevts / feff * tpe
if cput is None:
return 0
if unit == 'm':
cput = cput / (60.)
if unit == 'h':
cput = cput / (60. * 60.)
if unit == 'd':
cput = cput / (60. * 60. * 24.)
return cput
def sigmoid(x):
"Sigmoid function"
return 1. / (1 + math.exp(-x))
def getNCopies(cpuHours, minN=2, maxN=3, weight=50000, constant=100000):
"Calculate number of copies for given workflow"
func = sigmoid(-constant / weight)
fact = (maxN - minN) / (1 - func)
base = (func * maxN - minN) / (func - 1)
return int(base + fact * sigmoid((cpuHours - constant) / weight))
def teraBytes(size):
"Return size in TB (Terabytes)"
return size / (1000 ** 4)
def gigaBytes(size):
"Return size in GB (Gigabytes), rounded to 2 digits"
return round(size / (1000 ** 3), 2)
def elapsedTime(time0, msg='Elapsed time', ndigits=1):
"Helper function to return elapsed time message"
msg = "%s: %s sec" % (msg, round(time.time() - time0, ndigits))
return msg
def getRequest(url, params):
"Helper function to GET data from given URL"
mgr = RequestHandler()
headers = {'Accept': 'application/json'}
verbose = 0
if 'verbose' in params:
verbose = params['verbose']
del params['verbose']
data = mgr.getdata(url, params, headers, ckey=ckey(), cert=cert(), verbose=verbose)
return data
def postRequest(url, params):
"Helper function to POST request to given URL"
mgr = RequestHandler()
headers = {'Accept': 'application/json'}
verbose = 0
if 'verbose' in params:
verbose = params['verbose']
del params['verbose']
data = mgr.getdata(url, params, headers, ckey=ckey(), cert=cert(),
verb='POST', verbose=verbose)
return data
def getIO(request, dbsUrl):
"Get input/output info about given request"
lhe = False
primary = set()
parent = set()
secondary = set()
if 'Chain' in request['RequestType']:
base = request['RequestType'].replace('Chain', '')
item = 1
while '%s%d' % (base, item) in request:
alhe, aprimary, aparent, asecondary = \
ioForTask(request['%s%d' % (base, item)], dbsUrl)
if alhe:
lhe = True
primary.update(aprimary)
parent.update(aparent)
secondary.update(asecondary)
item += 1
else:
lhe, primary, parent, secondary = ioForTask(request, dbsUrl)
return lhe, primary, parent, secondary
def ioForTask(request, dbsUrl):
"Return lfn, primary, parent and secondary datasets for given request"
lhe = False
primary = set()
parent = set()
secondary = set()
if 'InputDataset' in request:
datasets = request['InputDataset']
datasets = datasets if isinstance(datasets, list) else [datasets]
primary = set([r for r in datasets if r])
if primary and 'IncludeParent' in request and request['IncludeParent']:
parent = findParent(primary, dbsUrl)
if 'MCPileup' in request:
pileups = request['MCPileup']
pileups = pileups if isinstance(pileups, list) else [pileups]
secondary = set([r for r in pileups if r])
if 'LheInputFiles' in request and request['LheInputFiles'] in ['True', True]:
lhe = True
return lhe, primary, parent, secondary
def findParent(datasets, dbsUrl):
"""
Helper function to find the parent dataset.
It returns a dictionary key'ed by the child dataset
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
parentByDset = {}
if not datasets:
return parentByDset
urls = ['%s/datasetparents?dataset=%s' % (dbsUrl, d) for d in datasets]
logging.info("Executing %d requests against DBS 'datasetparents' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if hasHTTPFailed(row):
print("Failure in findParent for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
parentByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
try:
for item in rows:
parentByDset[item['this_dataset']] = item['parent_dataset']
except Exception as exc:
print("Failure in findParent for dataset %s. Error: %s" % (dataset, str(exc)))
parentByDset[dataset] = None
return parentByDset | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/MicroService/Tools/Common.py | 0.69181 | 0.217982 | Common.py | pypi |
from __future__ import print_function, division, absolute_import
from builtins import str
from future.utils import viewitems
from future import standard_library
standard_library.install_aliases()
import datetime
import json
import logging
import re
from urllib.parse import quote, unquote
from Utils.CertTools import cert, ckey
from WMCore.Services.pycurl_manager import RequestHandler
from WMCore.Services.pycurl_manager import getdata as multi_getdata
### Amount of days that we wait for stuck rules to be sorted
### After that, the rule is not considered and a new rule is created
STUCK_LIMIT = 7 # 7 days
def parseNewLineJson(stream):
"""
Parse newline delimited json streaming data
"""
for line in stream.split("\n"):
if line:
yield json.loads(line)
def stringDateToEpoch(strDate):
"""
Given a date/time in the format of:
'Thu, 29 Apr 2021 13:15:42 UTC'
it returns an integer with the equivalent EPOCH time
:param strDate: a string with the date and time
:return: the equivalent EPOCH time (integer)
"""
timestamp = datetime.datetime.strptime(strDate, "%a, %d %b %Y %H:%M:%S %Z")
return int(timestamp.strftime('%s'))
def getRucioToken(rucioAuthUrl, rucioAcct):
"""
Provided a Rucio account, fetch a token from the authentication server
:param rucioAuthUrl: url to the rucio authentication server
:param rucioAcct: rucio account to be used
:return: an integer with the expiration time in EPOCH
"""
params = {}
headers = {"X-Rucio-Account": rucioAcct}
url = '%s/auth/x509' % rucioAuthUrl
logging.info("Requesting a token to Rucio for account: %s, against url: %s", rucioAcct, rucioAuthUrl)
mgr = RequestHandler()
res = mgr.getheader(url, params=params, headers=headers, ckey=ckey(), cert=cert())
if res.getReason() == "OK":
userToken = res.getHeaderKey('X-Rucio-Auth-Token')
tokenExpiration = res.getHeaderKey('X-Rucio-Auth-Token-Expires')
logging.info("Retrieved Rucio token valid until: %s", tokenExpiration)
# convert the human readable expiration time to EPOCH time
tokenExpiration = stringDateToEpoch(tokenExpiration)
return userToken, tokenExpiration
raise RuntimeError("Failed to acquire a Rucio token. Error: {}".format(res.getReason()))
def renewRucioToken(rucioAuthUrl, userToken):
"""
Provided a user Rucio token, check it's lifetime and extend it by another hour
:param rucioAuthUrl: url to the rucio authentication server
:param rucioAcct: rucio account to be used
:return: a datetime.datetime object with the new token lifetime
"""
params = {}
headers = {"X-Rucio-Auth-Token": userToken}
url = '%s/auth/validate' % rucioAuthUrl
logging.info("Renewing the Rucio token...")
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
try:
newExpiration = eval(res)['lifetime']
except Exception as exc:
raise RuntimeError("Failed to renew Rucio token. Response: {} Error: {}".format(res, str(exc)))
return newExpiration
def getPileupContainerSizesRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of containers, find their total size in Rucio
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a flat dictionary of container and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE: Rucio version of getPileupDatasetSizes()
"""
sizeByDset = {}
if not containers:
return sizeByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = ['{}/dids/{}/{}?dynamic=anything'.format(rucioUrl, scope, cont) for cont in containers]
logging.info("Executing %d requests against Rucio for the container size", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split('/dids/{}/'.format(scope))[1]
container = container.replace("?dynamic=anything", "")
if row['data'] is None:
msg = "Failure in getPileupContainerSizesRucio for container {}. Response: {}".format(container, row)
logging.error(msg)
sizeByDset.setdefault(container, None)
continue
response = json.loads(row['data'])
try:
sizeByDset.setdefault(container, response['bytes'])
except KeyError:
msg = "getPileupContainerSizesRucio function did not return a valid response for container: %s. Error: %s"
logging.error(msg, container, response)
sizeByDset.setdefault(container, None)
continue
return sizeByDset
def listReplicationRules(containers, rucioAccount, grouping,
rucioUrl, rucioToken, scope="cms"):
"""
List all the replication rules for the input filters provided.
It builds a dictionary of container name and the locations where
they have a rule locking data on, with some additional rule state
logic in the code.
:param containers: list of container names
:param rucioAccount: string with the rucio account
:param grouping: rule grouping string, only "A" or "D" are allowed
:param rucioUrl: string with the Rucio url
:param rucioToken: string with the Rucio token
:param scope: string with the data scope
:return: a flat dictionary key'ed by the container name, with a list of RSE
expressions that still need to be resolved
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE-2: Available rule states can be found at:
https://github.com/rucio/rucio/blob/16f39dffa1608caa0a1af8bbc0fcff2965dccc50/lib/rucio/db/sqla/constants.py#L180
"""
locationByContainer = {}
if not containers:
return locationByContainer
if grouping not in ["A", "D"]:
raise RuntimeError("Replication rule grouping value provided ({}) is not allowed!".format(grouping))
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = []
for cont in containers:
urls.append('{}/rules/?scope={}&account={}&grouping={}&name={}'.format(rucioUrl, scope, rucioAccount,
grouping, quote(cont, safe="")))
logging.info("Executing %d requests against Rucio to list replication rules", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = unquote(row['url'].split("name=")[1])
if "200 OK" not in row['headers']:
msg = "Failure in listReplicationRules for container {}. Response: {}".format(container, row)
logging.error(msg)
locationByContainer.setdefault(container, None)
continue
try:
locationByContainer.setdefault(container, [])
for item in parseNewLineJson(row['data']):
if item['state'] in ["U", "SUSPENDED", "R", "REPLICATING", "I", "INJECT"]:
msg = "Container %s has a rule ID %s in state %s. Will try to create a new rule."
logging.warning(msg, container, item['id'], item['state'])
continue
elif item['state'] in ["S", "STUCK"]:
if item['error'] == 'NO_SOURCES:NO_SOURCES':
msg = "Container {} has a STUCK rule with NO_SOURCES.".format(container)
msg += " Data could be lost forever... Rule info is: {}".format(item)
logging.warning(msg)
continue
# then calculate for how long it's been stuck
utcTimeNow = int(datetime.datetime.utcnow().strftime('%s'))
if item['stuck_at']:
stuckAt = stringDateToEpoch(item['stuck_at'])
else:
# consider it to be stuck since its creation
stuckAt = stringDateToEpoch(item['created_at'])
daysStuck = (utcTimeNow - stuckAt) // (24 * 60 * 60)
if daysStuck > STUCK_LIMIT:
msg = "Container {} has a STUCK rule for {} days (limit set to: {}).".format(container,
daysStuck,
STUCK_LIMIT)
msg += " Not going to use it! Rule info: {}".format(item)
logging.warning(msg)
continue
else:
msg = "Container {} has a STUCK rule for only {} days.".format(container, daysStuck)
msg += " Considering it for the pileup location"
logging.info(msg)
else:
logging.info("Container %s has rule ID %s in state %s, using it.",
container, item['id'], item['state'])
### NOTE: this is not an RSE name, but an RSE expression that still needs to be resolved
locationByContainer[container].append(item['rse_expression'])
except Exception as exc:
msg = "listReplicationRules function did not return a valid response for container: %s."
msg += "Server responded with: %s\nError: %s"
logging.exception(msg, container, str(exc), row['data'])
locationByContainer.setdefault(container, None)
continue
return locationByContainer
def getPileupSubscriptionsRucio(datasets, rucioUrl, rucioToken, scope="cms"):
"""
Provided a list of datasets, find dataset level subscriptions where it's
as complete as `percent_min`.
:param datasets: list of dataset names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary of datasets and a list of their location.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
# FIXME: we should definitely make a feature request to Rucio...
# so much, just to get the final RSEs for a container!!!
locationByDset = {}
if not datasets:
return locationByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
# first, resolve the dataset into blocks
blocksByDset = getContainerBlocksRucio(datasets, rucioUrl, rucioToken, scope)
urls = []
for _dset, blocks in viewitems(blocksByDset):
if blocks:
for block in blocks:
urls.append('{}/replicas/{}/{}/datasets'.format(rucioUrl, scope, quote(block)))
# this is going to be bloody expensive in terms of HTTP requests
logging.info("Executing %d requests against Rucio replicas API for blocks", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
block = row['url'].split("/{}/".format(scope))[1]
block = unquote(re.sub("/datasets$", "", block, 1))
container = block.split("#")[0]
locationByDset.setdefault(container, set())
if row['data'] is None:
msg = "Failure in getPileupSubscriptionsRucio container {} and block {}.".format(container, block)
msg += " Response: {}".format(row)
logging.error(msg)
locationByDset[container] = None
continue
if locationByDset[container] is None:
# then one of the block requests failed, skip the whole dataset
continue
thisBlockRSEs = set()
for item in parseNewLineJson(row['data']):
if item['state'] == "AVAILABLE":
thisBlockRSEs.add(item["rse"])
logging.info("Block: %s is available at: %s", block, thisBlockRSEs)
# now we have the final block location
if not locationByDset[container]:
# then this is the first block of this dataset
locationByDset[container] = thisBlockRSEs
else:
# otherwise, make an intersection of them
locationByDset[container] = locationByDset[container] & thisBlockRSEs
return locationByDset
def getBlocksAndSizeRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of containers, find all their correspondent blocks and their sizes.
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE2: meant to return an output similar to Common.getBlockReplicasAndSize
"""
contBlockSize = {}
if not containers:
return contBlockSize
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = []
for cont in containers:
urls.append('{}/dids/{}/dids/search?type=dataset&long=True&name={}'.format(rucioUrl, scope, quote(cont + "#*")))
logging.info("Executing %d requests against Rucio DIDs search API for containers", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split("name=")[1]
container = unquote(container).replace("#*", "")
contBlockSize.setdefault(container, {})
if row['data'] in [None, ""]:
msg = "Failure in getBlocksAndSizeRucio function for container {}. Response: {}".format(container, row)
logging.error(msg)
contBlockSize[container] = None
continue
for item in parseNewLineJson(row['data']):
# NOTE: we do not care about primary block location in Rucio
contBlockSize[container][item['name']] = {"blockSize": item['bytes'], "locations": []}
return contBlockSize
### NOTE: likely not going to be used for a while
def getContainerBlocksRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Provided a list of containers, find all their blocks.
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary key'ed by the datasets with a list of blocks.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
blocksByDset = {}
if not containers:
return blocksByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = ['{}/dids/{}/{}/dids'.format(rucioUrl, scope, cont) for cont in containers]
logging.info("Executing %d requests against Rucio DIDs API for blocks in containers", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split("/{}/".format(scope))[1]
container = re.sub("/dids$", "", container, 1)
if not row['data']:
logging.warning("Dataset: %s has no blocks in Rucio", container)
blocksByDset.setdefault(container, [])
for item in parseNewLineJson(row['data']):
blocksByDset[container].append(item["name"])
return blocksByDset
### NOTE: likely not going to be used for a while
def getBlockReplicasAndSizeRucio(datasets, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of datasets, find all their blocks with replicas
available.
:param datasets: list of dataset names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
dsetBlockSize = {}
if not datasets:
return dsetBlockSize
headers = {"X-Rucio-Auth-Token": rucioToken}
# first, figure out their block names
blocksByDset = getContainerBlocksRucio(datasets, rucioUrl, rucioToken, scope=scope)
urls = []
for _dset, blocks in viewitems(blocksByDset):
for block in blocks:
urls.append('{}/replicas/{}/{}/datasets'.format(rucioUrl, scope, quote(block)))
# next, query the replicas API for the block location
# this is going to be bloody expensive in terms of HTTP requests
logging.info("Executing %d requests against Rucio replicas API for blocks", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
block = row['url'].split("/{}/".format(scope))[1]
block = unquote(re.sub("/datasets$", "", block, 1))
container = block.split("#")[0]
dsetBlockSize.setdefault(container, dict())
if row['data'] is None:
msg = "Failure in getBlockReplicasAndSizeRucio for container {} and block {}.".format(container, block)
msg += " Response: {}".format(row)
logging.error(msg)
dsetBlockSize[container] = None
continue
if dsetBlockSize[container] is None:
# then one of the block requests failed, skip the whole dataset
continue
thisBlockRSEs = []
blockBytes = 0
for item in parseNewLineJson(row['data']):
blockBytes = item['bytes']
if item['state'] == "AVAILABLE":
thisBlockRSEs.append(item["rse"])
# now we have the final block location
if not blockBytes and not thisBlockRSEs:
logging.warning("Block: %s has no replicas and no size", block)
else:
dsetBlockSize[container][block] = {"locations": thisBlockRSEs, "blockSize": blockBytes}
return dsetBlockSize | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/MicroService/Tools/PycurlRucio.py | 0.661923 | 0.218305 | PycurlRucio.py | pypi |
from __future__ import print_function, division
# system modules
import os
import re
# WMCore modules
from WMCore.REST.Server import RESTFrontPage
class FrontPage(RESTFrontPage):
"""MicroService front page.
MicroService provides only one web page, the front page. The page just
loads the javascript user interface, complete with CSS and all JS
code embedded into it.
The JavaScript code performs all the app functionality via the REST
interface defined by the :class:`~.Data` class.
"""
def __init__(self, app, config, mount):
"""
:arg app: reference to the application object.
:arg config: reference to the configuration.
:arg str mount: URL mount point."""
mainroot = 'microservice' # entry point in access URL
wpath = os.getenv('MS_STATIC_ROOT', '')
print(wpath)
if not wpath:
content = os.path.abspath(__file__).rsplit('/', 5)[0]
xlib = (__file__.find("/xlib/") >= 0 and "x") or ""
wpath = "%s/%sdata/" % (content, xlib)
if not wpath.endswith('/'):
wpath += '/'
print(self.__class__.__name__, "static content: %s" % wpath)
mdict = {"root": wpath, "rx": re.compile(r"^[a-z]+/[-a-z0-9]+\.(?:html)$")}
tdict = {"root": wpath + "templates/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:html|tmpl)$")}
jdict = {"root": wpath + "js/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:js)$")}
cdict = {"root": wpath + "css/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\..*(?:css)$")}
idict = {"root": wpath + "images/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:png|gif|jpg)$")}
roots = {mainroot: mdict, "templates": tdict,
"js": jdict, "css": cdict, "images": idict}
# location of frontpage in the root, e.g. microservice
frontpage = "%s/templates/index.html" % mainroot
RESTFrontPage.__init__(self, app, config, mount, frontpage, roots) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/MicroService/WebGui/FrontPage.py | 0.585931 | 0.161816 | FrontPage.py | pypi |
from builtins import range
from WMCore.DataStructs.Run import Run
class Mask(dict):
"""
_Mask_
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
self.inclusive = True
self.setdefault("inclusivemask", True)
self.setdefault("FirstEvent", None)
self.setdefault("LastEvent", None)
self.setdefault("FirstLumi", None)
self.setdefault("LastLumi", None)
self.setdefault("FirstRun", None)
self.setdefault("LastRun", None)
self.setdefault("runAndLumis", {})
def setMaxAndSkipEvents(self, maxEvents, skipEvents):
"""
_setMaxAndSkipEvents_
Set FirstEvent & LastEvent fields as max & skip events
"""
self['FirstEvent'] = skipEvents
if maxEvents is not None:
self['LastEvent'] = skipEvents + maxEvents
return
def setMaxAndSkipLumis(self, maxLumis, skipLumi):
"""
_setMaxAndSkipLumis
Set the Maximum number of lumi sections and the starting point
"""
self['FirstLumi'] = skipLumi
self['LastLumi'] = skipLumi + maxLumis
return
def setMaxAndSkipRuns(self, maxRuns, skipRun):
"""
_setMaxAndSkipRuns
Set the Maximum number of runss and the starting point
"""
self['FirstRun'] = skipRun
self['LastRun'] = skipRun + maxRuns
return
def getMaxEvents(self):
"""
_getMaxEvents_
return maxevents setting
"""
if self['LastEvent'] is None or self['FirstEvent'] is None:
return None
return self['LastEvent'] - self['FirstEvent'] + 1
def getMax(self, keyType=None):
"""
_getMax_
returns the maximum number of runs/events/etc of the type of the type string
"""
if 'First%s' % (keyType) not in self:
return None
if self['First%s' % (keyType)] is None or self['Last%s' % (keyType)] is None:
return None
return self['Last%s' % (keyType)] - self['First%s' % (keyType)] + 1
def addRun(self, run):
"""
_addRun_
Add a run object
"""
run.lumis.sort()
firstLumi = run.lumis[0]
lastLumi = run.lumis[0]
for lumi in run.lumis:
if lumi <= lastLumi + 1:
lastLumi = lumi
else:
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
firstLumi = lumi
lastLumi = lumi
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
return
def addRunWithLumiRanges(self, run, lumiList):
"""
_addRunWithLumiRanges_
Add to runAndLumis with call signature
addRunWithLumiRanges(run=run, lumiList = [[start1,end1], [start2, end2], ...]
"""
self['runAndLumis'][run] = lumiList
return
def addRunAndLumis(self, run, lumis=None):
"""
_addRunAndLumis_
Add runs and lumis directly
TODO: The name of this function is a little misleading. If you pass a list of lumis
it ignores the content of the list and adds a range based on the max/min in
the list. Missing lumis in the list are ignored.
NOTE: If the new run/lumi range overlaps with the pre-existing lumi ranges in the
mask, no attempt is made to merge these together. This can result in a mask
with duplicate lumis.
"""
lumis = lumis or []
if not isinstance(lumis, list):
lumis = list(lumis)
if run not in self['runAndLumis']:
self['runAndLumis'][run] = []
self['runAndLumis'][run].append([min(lumis), max(lumis)])
return
def getRunAndLumis(self):
"""
_getRunAndLumis_
Return list of active runs and lumis
"""
return self['runAndLumis']
def runLumiInMask(self, run, lumi):
"""
_runLumiInMask_
See if a particular runLumi is in the mask
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return True
if run not in self['runAndLumis']:
return False
for pair in self['runAndLumis'][run]:
# Go through each max and min pair
if pair[0] <= lumi and pair[1] >= lumi:
# Then the lumi is bracketed
return True
return False
def filterRunLumisByMask(self, runs):
"""
_filterRunLumisByMask_
Pass a Mask a list of run objects, get back a list of
run objects that correspond to the actual mask allowed values
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return runs
runDict = {}
for r in runs:
if r.run in runDict:
runDict[r.run].extendLumis(r.lumis)
else:
runDict[r.run] = r
maskRuns = set(self["runAndLumis"].keys())
passedRuns = set([r.run for r in runs])
filteredRuns = maskRuns.intersection(passedRuns)
newRuns = set()
for runNumber in filteredRuns:
maskLumis = set()
for pair in self["runAndLumis"][runNumber]:
if pair[0] == pair[1]:
maskLumis.add(pair[0])
else:
maskLumis = maskLumis.union(list(range(pair[0], pair[1] + 1)))
filteredLumis = set(runDict[runNumber].lumis).intersection(maskLumis)
if len(filteredLumis) > 0:
filteredLumiEvents = [(lumi, runDict[runNumber].getEventsByLumi(lumi)) for lumi in filteredLumis]
newRuns.add(Run(runNumber, *filteredLumiEvents))
return newRuns | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/DataStructs/Mask.py | 0.704567 | 0.254295 | Mask.py | pypi |
from __future__ import absolute_import, division, print_function
from future.utils import listitems
import sys
import hashlib
import time
from functools import total_ordering
from Utils.Utilities import encodeUnicodeToBytes
from WMCore.DataStructs.WMObject import WMObject
@total_ordering
class WorkUnit(WMObject, dict):
"""
_WorkUnit_
Data object that contains details for a single work unit
corresponding to tables workunit and frl_workunit_assoc
"""
fieldsToCopy = ['taskid', 'retry_count', 'last_unit_count', 'last_submit_time', 'status', 'firstevent',
'lastevent', 'fileid']
fieldsForInfo = fieldsToCopy + ['run_lumi']
def __init__(self, taskID=None, retryCount=0, lastUnitCount=None, lastSubmitTime=int(time.time()),
status=0, firstEvent=1, lastEvent=sys.maxsize, fileid=None, runLumi=None):
super(WorkUnit, self).__init__(self)
self.setdefault('taskid', taskID)
self.setdefault('retry_count', retryCount)
self.setdefault('last_unit_count', lastUnitCount)
self.setdefault('last_submit_time', lastSubmitTime)
self.setdefault('status', status)
self.setdefault('firstevent', firstEvent)
self.setdefault('lastevent', lastEvent)
self.setdefault('fileid', fileid)
self.setdefault('run_lumi', runLumi)
def __lt__(self, rhs):
"""
Compare work units in task id, run, lumi, first event, last event
"""
if self['taskid'] != rhs['taskid']:
return self['taskid'] < rhs['taskid']
if self['run_lumi'].run != rhs['run_lumi'].run:
return self['run_lumi'].run < rhs['run_lumi'].run
if self['run_lumi'].lumis != rhs['run_lumi'].lumis:
return self['run_lumi'].lumis < rhs['run_lumi'].lumis
if self['first_event'] != rhs['first_event']:
return self['first_event'] < rhs['first_event']
return self['last_event'] < rhs['last_event']
def __eq__(self, rhs):
"""
Work unit is equal if it has the same task, run, and lumi
"""
return (self['taskid'] == rhs['taskid'] and self['run_lumi'].run == self['run_lumi'].run and
self['run_lumi'].lumis == self['run_lumi'].lumis and self['firstevent'] == rhs['firstevent'] and
self['lastevent'] == rhs['lastevent'])
def __hash__(self):
"""
Hash function for this dict.
"""
# Generate an immutable sorted string representing this object
# NOTE: the run object needs to be hashed
immutableSelf = []
for keyName in sorted(self):
if keyName == "run_lumi":
immutableSelf.append((keyName, hash(self[keyName])))
else:
immutableSelf.append((keyName, self[keyName]))
hashValue = hashlib.sha1(encodeUnicodeToBytes(str(immutableSelf)))
return int(hashValue.hexdigest()[:15], 16)
def json(self, thunker=None):
"""
_json_
Serialize the object. Only copy select fields and construct one new field.
"""
jsonDict = {k: self[k] for k in WorkUnit.fieldsToCopy}
jsonDict["run_lumi"] = {"run_number": self['run_lumi'].run, "lumis": self['run_lumi'].lumis}
return jsonDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker)
def getInfo(self):
"""
Returns: tuple of parameters for the work unit
"""
return tuple(self[x] for x in WorkUnit.fieldsForInfo) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/DataStructs/WorkUnit.py | 0.592195 | 0.15241 | WorkUnit.py | pypi |
from builtins import str, bytes
__all__ = []
from WMCore.DataStructs.Run import Run
from WMCore.DataStructs.WMObject import WMObject
class File(WMObject, dict):
"""
_File_
Data object that contains details for a single file
TODO
- use the decorator `from functools import total_ordering` after
dropping support for python 2.6
- then, drop __ne__, __le__, __gt__, __ge__
"""
def __init__(self, lfn="", size=0, events=0, checksums=None,
parents=None, locations=None, merged=False):
dict.__init__(self)
checksums = checksums or {}
self.setdefault("lfn", lfn)
self.setdefault("size", size)
self.setdefault("events", events)
self.setdefault("checksums", checksums)
self.setdefault('runs', set())
self.setdefault('merged', merged)
self.setdefault('last_event', 0)
self.setdefault('first_event', 0)
if locations is None:
self.setdefault("locations", set())
else:
self.setdefault("locations", locations)
if parents is None:
self.setdefault("parents", set())
else:
self.setdefault("parents", parents)
def addRun(self, run):
"""
_addRun_
run should be an instance of WMCore.DataStructs.Run
Add a run container to this file, tweak the run and lumi
keys to be max run and max lumi for backwards compat.
"""
if not isinstance(run, Run):
msg = "addRun argument must be of type WMCore.DataStructs.Run"
raise RuntimeError(msg)
addFlag = False
for runMember in self['runs']:
if runMember.run == run.run:
# this rely on Run object overwrite __add__ to update self
runMember + run
addFlag = True
if not addFlag:
self['runs'].add(run)
return
def load(self):
"""
A DataStructs file has nothing to load from, other implementations will
over-ride this method.
"""
if self['id']:
self['lfn'] = '/store/testing/%s' % self['id']
def save(self):
"""
A DataStructs file has nothing to save to, other implementations will
over-ride this method.
"""
pass
def setLocation(self, pnn):
# Make sure we don't add None, [], "" as file location
if pnn:
self['locations'] = self['locations'] | set(self.makelist(pnn))
def __eq__(self, rhs):
"""
File is equal if it has the same name
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] == rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] == rhs
return eq
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
thisHash = self['lfn'].__hash__()
return thisHash
def __lt__(self, rhs):
"""
Sort files based on lexicographical ordering of the value connected
to the 'lfn' key
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] < rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] < rhs
return eq
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
def json(self, thunker=None):
"""
_json_
Serialize the file object. This will convert all Sets() to lists and
weed out the internal data structures that don't need to be shared.
"""
fileDict = {"last_event": self["last_event"],
"first_event": self["first_event"],
"lfn": self["lfn"],
"locations": list(self["locations"]),
"id": self.get("id", None),
"checksums": self["checksums"],
"events": self["events"],
"merged": self["merged"],
"size": self["size"],
"runs": [],
"parents": []}
for parent in self["parents"]:
if isinstance(parent, (str, bytes)):
# Then for some reason, we're passing strings
# Done specifically for ErrorHandler
fileDict['parents'].append(parent)
elif thunker is None:
continue
else:
fileDict["parents"].append(thunker._thunk(parent))
for run in self["runs"]:
runDict = {"run_number": run.run,
"lumis": run.lumis}
fileDict["runs"].append(runDict)
return fileDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/DataStructs/File.py | 0.533884 | 0.174868 | File.py | pypi |
from builtins import str
from WMCore.DataStructs.WMObject import WMObject
class SummaryHistogram(WMObject):
"""
_SummaryHistogram_
Histogram object, provides familiar CRUD methods
which take care of most of the statistical
calculations when adding points, this object
can also be converted into a dictionary
for JSON documents. It knows how to combine
with other histograms and create itself from
a dictionary provided it has matching structure.
This is an interface, the real work is done
by the ContinuousSummaryHistogram and
DiscreteSummaryHistogram objects
"""
def __init__(self, title = None, xLabel = None):
"""
__init__
Initialize the elements in the object.
"""
# Meta-information about the histogram, it can be changed at any point
self.title = title
self.xLabel = xLabel
# These shouldn't be touched from anything outside the SummaryHistogram object and children classes
self.continuous = None
self.jsonInternal = None
self.data = {}
self.average = None
self.stdDev = None
return
def setTitle(self, newTitle):
"""
_setTitle_
Set the title
"""
self.title = newTitle
return
def setHorizontalLabel(self, xLabel):
"""
_setHorizontalLabel_
Set the label on the x axis
"""
self.xLabel = xLabel
return
def addPoint(self, xValue, yLabel):
"""
_addPoint_
Add a point to the histogram data, a histogram
can have many types of y values for the same x if
x is continuous otherwise it is only one yLabel.
They should be in a similar scale for best results.
"""
raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation")
def toJSON(self):
"""
_toJSON_
Return a dictionary which is compatible
with a JSON object
"""
if self.continuous is None:
raise TypeError("toJSON can't be called on a bare SummaryHistogram object")
# Get what the children classes did
jsonDict = {}
jsonDict['internalData'] = self.jsonInternal or {}
# Add the common things
jsonDict['title'] = self.title
jsonDict['xLabel'] = self.xLabel
jsonDict['continuous'] = self.continuous
jsonDict['data'] = self.data
jsonDict['stdDev'] = self.stdDev
jsonDict['average'] = self.average
return jsonDict
def __add__(self, other):
"""
__add__
Add two histograms, combine statistics.
"""
raise NotImplementedError("SummaryHistogram objects can't be used, use either the continuous or discrete implementation")
def __str__(self):
"""
__str__
Return the str object of the JSON
"""
return str(self.toJSON()) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/DataStructs/MathStructs/SummaryHistogram.py | 0.844601 | 0.581749 | SummaryHistogram.py | pypi |
from __future__ import division
import math
from WMCore.DataStructs.MathStructs.SummaryHistogram import SummaryHistogram
from WMCore.Algorithms.MathAlgos import validateNumericInput
from WMCore.Algorithms.MathAlgos import calculateRunningAverageAndQValue, calculateStdDevFromQ
class ContinuousSummaryHistogram(SummaryHistogram):
"""
_ContinuousSummaryHistogram_
A histogram where there are continuous points
with certain frequency, it follows
that there is only one value in Y and
that the average and standard deviation are
not calculated on the frequency values but the X values.
"""
def __init__(self, title, xLabel, yLabel = None,
roundingDecimals = 2, nBins = None,
dropOutliers = False, sigmaLimit = 5,
storeHistogram = True):
"""
__init__
Initialize a more complex histogram structure, containing different
data to calculate online average and standard deviations. This data is also
stored in the JSON to allow rebuilding and adding histograms.
All histograms are binned when requested, the resolution can be specified
through nBins, otherwise the value used is the one recommended in:
Wand, M.P. (1997), "Data-Based Choice of Histogram Bin Width," The American Statistician, 51, 59-64.
If specified, outlier farther than sigmaLimit standard deviations from the
mean will not be included in the binned histogram.
"""
# Initialize the parent object
SummaryHistogram.__init__(self, title, xLabel)
# Indicate this is a discrete histogram
self.continuous = True
# Add data only used in the continuous version
self.yLabel = yLabel
self.nPoints = 0
self.QValue = None
self.average = None
# Configuration parameters for the continuous histograms
self.roundingDecimals = roundingDecimals
self.fixedNBins = nBins
self.dropOutliers = dropOutliers
self.sigmaLimit = sigmaLimit
self.binned = False
self.storeHistogram = storeHistogram
# Override initialization of some attributes
self.average = 0.0
self.stdDev = 0.0
return
def addPoint(self, xValue, yLabel = None):
"""
_addPoint_
Add a point from a continuous set (only-numbers allowed currently) to the histogram data,
calculate the running average and standard deviation.
If no y-label had been specified before, one must be supplied
otherwise the given y-label must be either None or equal
to the stored value.
"""
if self.binned:
# Points can't be added to binned histograms!
raise Exception("Points can't be added to binned histograms")
if self.yLabel is None and yLabel is None:
raise Exception("Some y-label must be stored for the histogram")
elif self.yLabel is None:
self.yLabel = yLabel
elif yLabel is not None and self.yLabel != yLabel:
raise Exception("Only one y-label is allowed on continuous histograms")
if not validateNumericInput(xValue):
# Do nothing if it is not a number
return
xValue = float(xValue)
xValue = round(xValue, self.roundingDecimals)
if self.storeHistogram:
if xValue not in self.data:
self.data[xValue] = 0
self.data[xValue] += 1
self.nPoints += 1
(self.average, self.QValue) = calculateRunningAverageAndQValue(xValue, self.nPoints, self.average, self.QValue)
return
def __add__(self, other):
#TODO: For HG1302, support multiple agents properly in the workload summary
raise NotImplementedError
def toJSON(self):
"""
_toJSON_
Bin the histogram if any, calculate the standard deviation. Store
the internal data needed for reconstruction of the histogram
from JSON and call superclass toJSON method.
"""
if self.nPoints:
self.stdDev = calculateStdDevFromQ(self.QValue, self.nPoints)
if not self.binned and self.storeHistogram:
self.binHistogram()
self.jsonInternal = {}
self.jsonInternal['yLabel'] = self.yLabel
self.jsonInternal['QValue'] = self.QValue
self.jsonInternal['nPoints'] = self.nPoints
return SummaryHistogram.toJSON(self)
def binHistogram(self):
"""
_binHistogram_
Histograms of continuous data must be binned,
this takes care of that using given or optimal parameters.
Note that this modifies the data object,
and points can't be added to the histogram after this.
"""
if not self.nPoints:
return
self.binned = True
# Number of bins can be specified or calculated based on number of points
nBins = self.fixedNBins
if nBins is None:
nBins = int(math.floor((5.0 / 3.0) * math.pow(self.nPoints, 1.0 / 3.0)))
# Define min and max
if not self.dropOutliers:
upperLimit = max(self.data.keys())
lowerLimit = min(self.data.keys())
else:
stdDev = calculateStdDevFromQ(self.QValue, self.nPoints)
upperLimit = self.average + (stdDev * self.sigmaLimit)
lowerLimit = self.average - (stdDev * self.sigmaLimit)
# Incremental delta
delta = abs(float(upperLimit - lowerLimit)) / nBins
# Build the bins, it's a list of tuples for now
bins = []
a = lowerLimit
b = lowerLimit + delta
while len(bins) < nBins:
bins.append((a, b))
a += delta
b += delta
# Go through data and populate the binned histogram
binnedHisto = {}
currentBin = 0
currentPoint = 0
sortedData = sorted(self.data.keys())
while currentPoint < len(sortedData):
point = sortedData[currentPoint]
encodedTuple = "%s,%s" % (bins[currentBin][0], bins[currentBin][1])
if encodedTuple not in binnedHisto:
binnedHisto[encodedTuple] = 0
if point > upperLimit or point < lowerLimit:
currentPoint += 1
elif currentBin == len(bins) - 1:
binnedHisto[encodedTuple] += self.data[point]
currentPoint += 1
elif point >= bins[currentBin][0] and point < bins[currentBin][1]:
binnedHisto[encodedTuple] += self.data[point]
currentPoint += 1
else:
currentBin += 1
self.data = binnedHisto
return | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/DataStructs/MathStructs/ContinuousSummaryHistogram.py | 0.753058 | 0.633354 | ContinuousSummaryHistogram.py | pypi |
from WMCore.DataStructs.MathStructs.SummaryHistogram import SummaryHistogram
from WMCore.Algorithms.MathAlgos import getAverageStdDev
class DiscreteSummaryHistogram(SummaryHistogram):
"""
_DiscreteSummaryHistogram_
A histogram where the data is organized by
a finite number of categories, it can have
many values for each category.
"""
def __init__(self, title, xLabel):
"""
__init__
Initialize a simpler histogram that only stores
the histogram. Everything else is calculated when the JSON is requested.
"""
# Initialize the parent object
SummaryHistogram.__init__(self, title, xLabel)
# Indicate this is a discrete histogram
self.continuous = False
# Add data only used in the discrete version
self.yLabels = set()
# Override initialization of some attributes
self.average = {}
self.stdDev = {}
return
def addPoint(self, xValue, yLabel):
"""
_addPoint_
Add point to discrete histogram,
x value is a category and therefore not rounded.
There can be many yLabel and standard deviations are
not calculated online. Histograms are always stored.
"""
if xValue not in self.data:
# Record the category
self.data[xValue] = {}
for label in self.yLabels:
self.data[xValue][label] = 0
if yLabel not in self.yLabels:
# Record the label
self.yLabels.add(yLabel)
self.average[yLabel] = 0.0
self.stdDev[yLabel] = 0.0
for category in self.data:
self.data[category][yLabel] = 0
self.data[xValue][yLabel] += 1
return
def __add__(self, other):
#TODO: For HG1302, support multiple agents properly in the workload summary
raise NotImplementedError
def toJSON(self):
"""
_toJSON_
Calculate average and standard deviation, store it
and call the parent class toJSON method
"""
for yLabel in self.yLabels:
numList = []
for xValue in self.data:
numList.append(self.data[xValue][yLabel])
(self.average[yLabel], self.stdDev[yLabel]) = getAverageStdDev(numList)
return SummaryHistogram.toJSON(self) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/src/python/WMCore/DataStructs/MathStructs/DiscreteSummaryHistogram.py | 0.702122 | 0.544801 | DiscreteSummaryHistogram.py | pypi |
import logging
import sys
from collections import Counter
from WMCore.Services.DBS.DBS3Reader import DBS3Reader
from WMCore.Services.Rucio.Rucio import Rucio
RUCIO_ACCT = "wma_prod"
RUCIO_HOST = "http://cms-rucio.cern.ch"
RUCIO_AUTH = "https://cms-rucio-auth.cern.ch"
DBS_URL = "https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader"
def loggerSetup(logLevel=logging.INFO):
"""
Return a logger which writes everything to stdout.
"""
logger = logging.getLogger(__name__)
outHandler = logging.StreamHandler(sys.stdout)
outHandler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(module)s: %(message)s"))
outHandler.setLevel(logLevel)
logger.addHandler(outHandler)
logger.setLevel(logLevel)
return logger
def getFromRucio(dataset, logger):
"""
Using the WMCore Rucio object and fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, value is the amount of files.
"""
rucio = Rucio(acct=RUCIO_ACCT,
hostUrl=RUCIO_HOST,
authUrl=RUCIO_AUTH,
configDict={'logger': logger})
result = dict()
for block in rucio.getBlocksInContainer(dataset):
data = rucio.getDID(block)
result.setdefault(block, data['length'])
return result
def getFromDBS(dataset, logger):
"""
Uses the WMCore DBS3Reader object to fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, and an inner dictionary
with the number of valid and invalid files. It also returns a total counter
for the number of valid and invalid files in the dataset.
"""
dbsReader = DBS3Reader(DBS_URL, logger)
result = dict()
dbsFilesCounter = Counter({'valid': 0, 'invalid': 0})
blocks = dbsReader.listFileBlocks(dataset)
for block in blocks:
data = dbsReader.dbs.listFileArray(block_name=block, validFileOnly=0, detail=True)
result.setdefault(block, Counter({'valid': 0, 'invalid': 0}))
for fileInfo in data:
if fileInfo['is_file_valid'] == 1:
result[block]['valid'] += 1
dbsFilesCounter['valid'] += 1
else:
result[block]['invalid'] += 1
dbsFilesCounter['invalid'] += 1
return result, dbsFilesCounter
def main():
"""
Expects a dataset name as input argument.
It then queries Rucio and DBS and compare their blocks and
number of files.
"""
if len(sys.argv) != 2:
print("A dataset name must be provided in the command line")
sys.exit(1)
datasetName = sys.argv[1]
logger = loggerSetup(logging.INFO)
rucioOutput = getFromRucio(datasetName, logger)
dbsOutput, dbsFilesCounter = getFromDBS(datasetName, logger)
logger.info("*** Dataset: %s", datasetName)
logger.info("Rucio file count : %s", sum(rucioOutput.values()))
logger.info("DBS file count : %s", dbsFilesCounter['valid'] + dbsFilesCounter['invalid'])
logger.info(" - valid files : %s", dbsFilesCounter['valid'])
logger.info(" - invalid files : %s", dbsFilesCounter['invalid'])
logger.info("Blocks in Rucio but not in DBS: %s", set(rucioOutput.keys()) - set(dbsOutput.keys()))
logger.info("Blocks in DBS but not in Rucio: %s", set(dbsOutput.keys()) - set(rucioOutput.keys()))
for blockname in rucioOutput:
if blockname not in dbsOutput:
logger.error("This block does not exist in DBS: %s", blockname)
continue
if rucioOutput[blockname] != sum(dbsOutput[blockname].values()):
logger.warning("Block with file mismatch: %s", blockname)
logger.warning("\tRucio: %s\t\tDBS: %s", rucioOutput[blockname], sum(dbsOutput[blockname].values()))
if __name__ == "__main__":
sys.exit(main()) | /reqmgr2ms-unmerged-2.2.4rc2.tar.gz/reqmgr2ms-unmerged-2.2.4rc2/bin/adhoc-scripts/checkDsetFileCount.py | 0.413477 | 0.306037 | checkDsetFileCount.py | pypi |
from textwrap import TextWrapper
from collections import OrderedDict
def twClosure(replace_whitespace=False,
break_long_words=False,
maxWidth=120,
maxLength=-1,
maxDepth=-1,
initial_indent=''):
"""
Deals with indentation of dictionaries with very long key, value pairs.
replace_whitespace: Replace each whitespace character with a single space.
break_long_words: If True words longer than width will be broken.
width: The maximum length of wrapped lines.
initial_indent: String that will be prepended to the first line of the output
Wraps all strings for both keys and values to 120 chars.
Uses 4 spaces indentation for both keys and values.
Nested dictionaries and lists go to next line.
"""
twr = TextWrapper(replace_whitespace=replace_whitespace,
break_long_words=break_long_words,
width=maxWidth,
initial_indent=initial_indent)
def twEnclosed(obj, ind='', depthReached=0, reCall=False):
"""
The inner function of the closure
ind: Initial indentation for the single output string
reCall: Flag to indicate a recursive call (should not be used outside)
"""
output = ''
if isinstance(obj, dict):
obj = OrderedDict(sorted(obj.items(),
key=lambda t: t[0],
reverse=False))
if reCall:
output += '\n'
ind += ' '
depthReached += 1
lengthReached = 0
for key, value in obj.items():
lengthReached += 1
if lengthReached > maxLength and maxLength >= 0:
output += "%s...\n" % ind
break
if depthReached <= maxDepth or maxDepth < 0:
output += "%s%s: %s" % (ind,
''.join(twr.wrap(key)),
twEnclosed(value, ind, depthReached=depthReached, reCall=True))
elif isinstance(obj, (list, set)):
if reCall:
output += '\n'
ind += ' '
lengthReached = 0
for value in obj:
lengthReached += 1
if lengthReached > maxLength and maxLength >= 0:
output += "%s...\n" % ind
break
if depthReached <= maxDepth or maxDepth < 0:
output += "%s%s" % (ind, twEnclosed(value, ind, depthReached=depthReached, reCall=True))
else:
output += "%s\n" % str(obj) # join(twr.wrap(str(obj)))
return output
return twEnclosed
def twPrint(obj, maxWidth=120, maxLength=-1, maxDepth=-1):
"""
A simple caller of twClosure (see docstring for twClosure)
"""
twPrinter = twClosure(maxWidth=maxWidth,
maxLength=maxLength,
maxDepth=maxDepth)
print(twPrinter(obj))
def twFormat(obj, maxWidth=120, maxLength=-1, maxDepth=-1):
"""
A simple caller of twClosure (see docstring for twClosure)
"""
twFormatter = twClosure(maxWidth=maxWidth,
maxLength=maxLength,
maxDepth=maxDepth)
return twFormatter(obj) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/Utils/TwPrint.py | 0.75985 | 0.299067 | TwPrint.py | pypi |
from __future__ import print_function, division
import io
import os
import stat
import subprocess
import time
import zlib
from Utils.Utilities import decodeBytesToUnicode
from Utils.PythonVersion import PY3
def calculateChecksums(filename):
"""
_calculateChecksums_
Get the adler32 and crc32 checksums of a file. Return None on error
Process line by line and adjust for known signed vs. unsigned issues
http://docs.python.org/library/zlib.html
The cksum UNIX command line tool implements a CRC32 checksum that is
different than any of the python algorithms, therefore open cksum
in a subprocess and feed it the same chunks of data that are used
to calculate the adler32 checksum.
"""
adler32Checksum = 1 # adler32 of an empty string
cksumProcess = subprocess.Popen("cksum", stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# the lambda basically creates an iterator function with zero
# arguments that steps through the file in 4096 byte chunks
with open(filename, 'rb') as f:
for chunk in iter((lambda: f.read(4096)), b''):
adler32Checksum = zlib.adler32(chunk, adler32Checksum)
cksumProcess.stdin.write(chunk)
cksumProcess.stdin.close()
cksumProcess.wait()
cksumStdout = cksumProcess.stdout.read().split()
cksumProcess.stdout.close()
# consistency check on the cksum output
filesize = os.stat(filename)[stat.ST_SIZE]
if len(cksumStdout) != 2 or int(cksumStdout[1]) != filesize:
raise RuntimeError("Something went wrong with the cksum calculation !")
if PY3:
# using native-string approach. convert from bytes to unicode in
# python 3 only.
cksumStdout[0] = decodeBytesToUnicode(cksumStdout[0])
return (format(adler32Checksum & 0xffffffff, '08x'), cksumStdout[0])
def tail(filename, nLines=20):
"""
_tail_
A version of tail
Adapted from code on http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
assert nLines >= 0
pos, lines = nLines + 1, []
# make sure only valid utf8 encoded chars will be passed along
with io.open(filename, 'r', encoding='utf8', errors='ignore') as f:
while len(lines) <= nLines:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = list(f)
pos *= 2
text = "".join(lines[-nLines:])
return text
def getFileInfo(filename):
"""
_getFileInfo_
Return file info in a friendly format
"""
filestats = os.stat(filename)
fileInfo = {'Name': filename,
'Size': filestats[stat.ST_SIZE],
'LastModification': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_MTIME])),
'LastAccess': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_ATIME]))}
return fileInfo
def findMagicStr(filename, matchString):
"""
_findMagicStr_
Parse a log file looking for a pattern string
"""
with io.open(filename, 'r', encoding='utf8', errors='ignore') as logfile:
# TODO: can we avoid reading the whole file
for line in logfile:
if matchString in line:
yield line
def getFullPath(name, envPath="PATH"):
"""
:param name: file name
:param envPath: any environment variable specified for path (PATH, PYTHONPATH, etc)
:return: full path if it is under PATH env
"""
for path in os.getenv(envPath).split(os.path.pathsep):
fullPath = os.path.join(path, name)
if os.path.exists(fullPath):
return fullPath
return None | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/Utils/FileTools.py | 0.639511 | 0.34726 | FileTools.py | pypi |
from __future__ import print_function, division
from builtins import str, bytes
def portForward(port):
"""
Decorator wrapper function for port forwarding of the REST calls of any
function to a given port.
Currently there are three constraints for applying this decorator.
1. The function to be decorated must be defined within a class and not being a static method.
The reason for that is because we need to be sure the function's signature will
always include the class instance as its first argument.
2. The url argument must be present as the second one in the positional argument list
of the decorated function (right after the class instance argument).
3. The url must follow the syntax specifications in RFC 1808:
https://tools.ietf.org/html/rfc1808.html
If all of the above constraints are fulfilled and the url is part of the
urlMangleList, then the url is parsed and the port is substituted with the
one provided as an argument to the decorator's wrapper function.
param port: The port to which the REST call should be forwarded.
"""
def portForwardDecorator(callFunc):
"""
The actual decorator
"""
def portMangle(callObj, url, *args, **kwargs):
"""
Function used to check if the url coming with the current argument list
is to be forwarded and if so change the port to the one provided as an
argument to the decorator wrapper.
:param classObj: This is the class object (slef from within the class)
which is always to be present in the signature of a
public method. We will never use this argument, but
we need it there for not breaking the positional
argument order
:param url: This is the actual url to be (eventually) forwarded
:param *args: The positional argument list coming from the original function
:param *kwargs: The keywords argument list coming from the original function
"""
forwarded = False
try:
if isinstance(url, str):
urlToMangle = u'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace(u'.cern.ch/', u'.cern.ch:%d/' % port, 1)
forwarded = True
elif isinstance(url, bytes):
urlToMangle = b'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace(b'.cern.ch/', b'.cern.ch:%d/' % port, 1)
forwarded = True
except Exception:
pass
if forwarded:
return callFunc(callObj, newUrl, *args, **kwargs)
else:
return callFunc(callObj, url, *args, **kwargs)
return portMangle
return portForwardDecorator
class PortForward():
"""
A class with a call method implementing a simple way to use the functionality
provided by the protForward decorator as a pure functional call:
EXAMPLE:
from Utils.PortForward import PortForward
portForwarder = PortForward(8443)
url = 'https://cmsweb-testbed.cern.ch/couchdb'
url = portForwarder(url)
"""
def __init__(self, port):
"""
The init method for the PortForward call class. This one is supposed
to simply provide an initial class instance with a logger.
"""
self.port = port
def __call__(self, url):
"""
The call method for the PortForward class
"""
def dummyCall(self, url):
return url
return portForward(self.port)(dummyCall)(self, url) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/Utils/PortForward.py | 0.831177 | 0.442275 | PortForward.py | pypi |
from __future__ import division, print_function
from builtins import object
from functools import reduce
class Functor(object):
"""
A simple functor class used to construct a function call which later to be
applied on an (any type) object.
NOTE:
It expects a function in the constructor and an (any type) object
passed to the run or __call__ methods, which methods once called they
construct and return the following function:
func(obj, *args, **kwargs)
NOTE:
All the additional arguments which the function may take must be set in
the __init__ method. If any of them are passed during run time an error
will be raised.
:func:
The function to which the rest of the constructor arguments are about
to be attached and then the newly created function will be returned.
- The function needs to take at least one parameter since the object
passed to the run/__call__ methods will always be put as a first
argument to the function.
:Example:
def adder(a, b, *args, **kwargs):
if args:
print("adder args: %s" % args)
if kwargs:
print("adder kwargs: %s" % kwargs)
res = a + b
return res
>>> x=Functor(adder, 8, 'foo', bar=True)
>>> x(2)
adder args: foo
adder kwargs: {'bar': True}
adder res: 10
10
>>> x
<Pipeline.Functor instance at 0x7f319bbaeea8>
"""
def __init__(self, func, *args, **kwargs):
"""
The init method for class Functor
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, obj):
"""
The call method for class Functor
"""
return self.run(obj)
def run(self, obj):
return self.func(obj, *self.args, **self.kwargs)
class Pipeline(object):
"""
A simple Functional Pipeline Class: applies a set of functions to an object,
where the output of every previous function is an input to the next one.
"""
# NOTE:
# Similar and inspiring approaches but yet some different implementations
# are discussed in the following two links [1] & [2]. With a quite good
# explanation in [1], which helped a lot. All in all at the bottom always
# sits the reduce function.
# [1]
# https://softwarejourneyman.com/python-function-pipelines.html
# [2]
# https://gitlab.com/mc706/functional-pipeline
def __init__(self, funcLine=None, name=None):
"""
:funcLine: A list of functions or Functors of function + arguments (see
the Class definition above) that are to be applied sequentially
to the object.
- If any of the elements of 'funcLine' is a function, a direct
function call with the object as an argument is performed.
- If any of the elements of 'funcLine' is a Functor, then the
first argument of the Functor constructor is the function to
be evaluated and the object is passed as a first argument to
the function with all the rest of the arguments passed right
after it eg. the following Functor in the funcLine:
Functor(func, 'foo', bar=True)
will result in the following function call later when the
pipeline is executed:
func(obj, 'foo', bar=True)
:Example:
(using the adder function from above and an object of type int)
>>> pipe = Pipeline([Functor(adder, 5),
Functor(adder, 6),
Functor(adder, 7, "extraArg"),
Functor(adder, 8, update=True)])
>>> pipe.run(1)
adder res: 6
adder res: 12
adder args: extraArg
adder res: 19
adder kwargs: {'update': True}
adder res: 27
"""
self.funcLine = funcLine or []
self.name = name
def getPipelineName(self):
"""
__getPipelineName__
"""
name = self.name or "Unnamed Pipeline"
return name
def run(self, obj):
return reduce(lambda obj, functor: functor(obj), self.funcLine, obj) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/Utils/Pipeline.py | 0.763836 | 0.441252 | Pipeline.py | pypi |
# system modules
import os
import ssl
import time
import logging
import traceback
# third part library
try:
import jwt
except ImportError:
traceback.print_exc()
jwt = None
from Utils.Utilities import encodeUnicodeToBytes
# prevent "SSL: CERTIFICATE_VERIFY_FAILED" error
# this will cause pylint warning W0212, therefore we ignore it above
ssl._create_default_https_context = ssl._create_unverified_context
def readToken(name=None):
"""
Read IAM token either from environment or file name
:param name: ether file name containing token or environment name which hold the token value.
If not provided it will be assumed to read token from IAM_TOKEN environment.
:return: token or None
"""
if name and os.path.exists(name):
token = None
with open(name, 'r', encoding='utf-8') as istream:
token = istream.read()
return token
if name:
return os.environ.get(name)
return os.environ.get("IAM_TOKEN")
def tokenData(token, url="https://cms-auth.web.cern.ch/jwk", audUrl="https://wlcg.cern.ch/jwt/v1/any"):
"""
inspect and extract token data
:param token: token string
:param url: IAM provider URL
:param audUrl: audience string
"""
if not token or not jwt:
return {}
if isinstance(token, str):
token = encodeUnicodeToBytes(token)
jwksClient = jwt.PyJWKClient(url)
signingKey = jwksClient.get_signing_key_from_jwt(token)
key = signingKey.key
headers = jwt.get_unverified_header(token)
alg = headers.get('alg', 'RS256')
data = jwt.decode(
token,
key,
algorithms=[alg],
audience=audUrl,
options={"verify_exp": True},
)
return data
def isValidToken(token):
"""
check if given token is valid or not
:param token: token string
:return: true or false
"""
tokenDict = {}
tokenDict = tokenData(token)
exp = tokenDict.get('exp', 0) # expire, seconds since epoch
if not exp or exp < time.time():
return False
return True
class TokenManager():
"""
TokenManager class handles IAM tokens
"""
def __init__(self,
name=None,
url="https://cms-auth.web.cern.ch/jwk",
audUrl="https://wlcg.cern.ch/jwt/v1/any",
logger=None):
"""
Token manager reads IAM tokens either from file or env.
It caches token along with expiration timestamp.
By default the env variable to use is IAM_TOKEN.
:param name: string representing either file or env where we should read token from
:param url: IAM provider URL
:param audUrl: audience string
:param logger: logger object or none to use default one
"""
self.name = name
self.url = url
self.audUrl = audUrl
self.expire = 0
self.token = None
self.logger = logger if logger else logging.getLogger()
try:
self.token = self.getToken()
except Exception as exc:
self.logger.exception("Failed to get token. Details: %s", str(exc))
def getToken(self):
"""
Return valid token and sets its expire timestamp
"""
if not self.token or not isValidToken(self.token):
self.token = readToken(self.name)
tokenDict = {}
try:
tokenDict = tokenData(self.token, url=self.url, audUrl=self.audUrl)
self.logger.debug(tokenDict)
except Exception as exc:
self.logger.exception(str(exc))
raise
self.expire = tokenDict.get('exp', 0)
return self.token
def getLifetime(self):
"""
Return reamaining lifetime of existing token
"""
return self.expire - int(time.time()) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/Utils/TokenManager.py | 0.66061 | 0.165863 | TokenManager.py | pypi |
from __future__ import (print_function, division)
from copy import copy
from builtins import object
from time import time
class MemoryCacheException(Exception):
def __init__(self, message):
super(MemoryCacheException, self).__init__(message)
class MemoryCache(object):
__slots__ = ["lastUpdate", "expiration", "_cache"]
def __init__(self, expiration, initialData=None):
"""
Initializes cache object
:param expiration: expiration time in seconds
:param initialData: initial value for the cache
"""
self.lastUpdate = int(time())
self.expiration = expiration
self._cache = initialData
def __contains__(self, item):
"""
Check whether item is in the current cache
:param item: a simple object (string, integer, etc)
:return: True if the object can be found in the cache, False otherwise
"""
return item in self._cache
def __getitem__(self, keyName):
"""
If the cache is a dictionary, return that item from the cache. Else, raise an exception.
:param keyName: the key name from the dictionary
"""
if isinstance(self._cache, dict):
return copy(self._cache.get(keyName))
else:
raise MemoryCacheException("Cannot retrieve an item from a non-dict MemoryCache object: {}".format(self._cache))
def reset(self):
"""
Resets the cache to its current data type
"""
if isinstance(self._cache, (dict, set)):
self._cache.clear()
elif isinstance(self._cache, list):
del self._cache[:]
else:
raise MemoryCacheException("The cache needs to be reset manually, data type unknown")
def isCacheExpired(self):
"""
Evaluate whether the cache has already expired, returning
True if it did, otherwise it returns False
"""
return self.lastUpdate + self.expiration < int(time())
def getCache(self):
"""
Raises an exception if the cache has expired, otherwise returns
its data
"""
if self.isCacheExpired():
expiredSince = int(time()) - (self.lastUpdate + self.expiration)
raise MemoryCacheException("Memory cache expired for %d seconds" % expiredSince)
return self._cache
def setCache(self, inputData):
"""
Refresh the cache with the content provided (refresh its expiration as well)
This method enforces the user to not change the cache data type
:param inputData: data to store in the cache
"""
if not isinstance(self._cache, type(inputData)):
raise TypeError("Current cache data type: %s, while new value is: %s" %
(type(self._cache), type(inputData)))
self.reset()
self.lastUpdate = int(time())
self._cache = inputData
def addItemToCache(self, inputItem):
"""
Adds new item(s) to the cache, without resetting its expiration.
It, of course, only works for data caches of type: list, set or dict.
:param inputItem: additional item to be added to the current cached data
"""
if isinstance(self._cache, set) and isinstance(inputItem, (list, set)):
# extend another list or set into a set
self._cache.update(inputItem)
elif isinstance(self._cache, set) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a set
self._cache.add(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (list, set)):
# extend another list or set into a list
self._cache.extend(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a list
self._cache.append(inputItem)
elif isinstance(self._cache, dict) and isinstance(inputItem, dict):
self._cache.update(inputItem)
else:
msg = "Input item type: %s cannot be added to a cache type: %s" % (type(self._cache), type(inputItem))
raise TypeError("Cache and input item data type mismatch. %s" % msg) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/Utils/MemoryCache.py | 0.815747 | 0.211254 | MemoryCache.py | pypi |
from __future__ import print_function, division, absolute_import
from builtins import object
import time
from datetime import tzinfo, timedelta
def timeFunction(func):
"""
source: https://www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
Decorator function to measure how long a method/function takes to run
It returns a tuple with:
* wall clock time spent
* returned result of the function
* the function name
"""
def wrapper(*arg, **kw):
t1 = time.time()
res = func(*arg, **kw)
t2 = time.time()
return round((t2 - t1), 4), res, func.__name__
return wrapper
class CodeTimer(object):
"""
A context manager for timing function calls.
Adapted from https://www.blog.pythonlibrary.org/2016/05/24/python-101-an-intro-to-benchmarking-your-code/
Use like
with CodeTimer(label='Doing something'):
do_something()
"""
def __init__(self, label='The function'):
self.start = time.time()
self.label = label
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
runtime = end - self.start
msg = '{label} took {time} seconds to complete'
print(msg.format(label=self.label, time=runtime))
class LocalTimezone(tzinfo):
"""
A required python 2 class to determine current timezone for formatting rfc3339 timestamps
Required for sending alerts to the MONIT AlertManager
Can be removed once WMCore starts using python3
Details of class can be found at: https://docs.python.org/2/library/datetime.html#tzinfo-objects
"""
def __init__(self):
super(LocalTimezone, self).__init__()
self.ZERO = timedelta(0)
self.STDOFFSET = timedelta(seconds=-time.timezone)
if time.daylight:
self.DSTOFFSET = timedelta(seconds=-time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return self.ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0 | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/Utils/Timers.py | 0.695441 | 0.249539 | Timers.py | pypi |
from __future__ import (division, print_function)
from future.utils import viewitems
import copy
import unittest
class ExtendedUnitTestCase(unittest.TestCase):
"""
Class that can be imported to switch to 'mock'ed versions of
services.
"""
def assertContentsEqual(self, expected_obj, actual_obj, msg=None):
"""
A nested object comparison without regard for the ordering of contents. It asserts that
expected_obj and actual_obj contain the same elements and that their sub-elements are the same.
However, all sequences are allowed to contain the same elements, but in different orders.
"""
def traverse_dict(dictionary):
for key, value in viewitems(dictionary):
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
return
def get_dict_sortkey(x):
if isinstance(x, dict):
return list(x.keys())
else:
return x
def traverse_list(theList):
for value in theList:
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
theList.sort(key=get_dict_sortkey)
return
if not isinstance(expected_obj, type(actual_obj)):
self.fail(msg="The two objects are different type and cannot be compared: %s and %s" % (
type(expected_obj), type(actual_obj)))
expected = copy.deepcopy(expected_obj)
actual = copy.deepcopy(actual_obj)
if isinstance(expected, dict):
traverse_dict(expected)
traverse_dict(actual)
elif isinstance(expected, list):
traverse_list(expected)
traverse_list(actual)
else:
self.fail(msg="The two objects are different type (%s) and cannot be compared." % type(expected_obj))
return self.assertEqual(expected, actual) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/Utils/ExtendedUnitTestCase.py | 0.765506 | 0.388328 | ExtendedUnitTestCase.py | pypi |
from __future__ import division, print_function
from future.utils import viewitems
from builtins import str, bytes
from past.builtins import basestring
import subprocess
import os
import re
import zlib
import base64
import sys
from types import ModuleType, FunctionType
from gc import get_referents
def lowerCmsHeaders(headers):
"""
Lower CMS headers in provided header's dict. The WMCore Authentication
code check only cms headers in lower case, e.g. cms-xxx-yyy.
"""
lheaders = {}
for hkey, hval in viewitems(headers): # perform lower-case
# lower header keys since we check lower-case in headers
if hkey.startswith('Cms-') or hkey.startswith('CMS-'):
lheaders[hkey.lower()] = hval
else:
lheaders[hkey] = hval
return lheaders
def makeList(stringList):
"""
_makeList_
Make a python list out of a comma separated list of strings,
throws a ValueError if the input is not well formed.
If the stringList is already of type list, then return it untouched.
"""
if isinstance(stringList, list):
return stringList
if isinstance(stringList, basestring):
toks = stringList.lstrip(' [').rstrip(' ]').split(',')
if toks == ['']:
return []
return [str(tok.strip(' \'"')) for tok in toks]
raise ValueError("Can't convert to list %s" % stringList)
def makeNonEmptyList(stringList):
"""
_makeNonEmptyList_
Given a string or a list of strings, return a non empty list of strings.
Throws an exception in case the final list is empty or input data is not
a string or a python list
"""
finalList = makeList(stringList)
if not finalList:
raise ValueError("Input data cannot be an empty list %s" % stringList)
return finalList
def strToBool(string):
"""
Try to convert different variations of True or False (including a string
type object) to a boolean value.
In short:
* True gets mapped from: True, "True", "true", "TRUE".
* False gets mapped from: False, "False", "false", "FALSE"
* anything else will fail
:param string: expects a boolean or a string, but it could be anything else
:return: a boolean value, or raise an exception if value passed in is not supported
"""
if string is False or string is True:
return string
elif string in ["True", "true", "TRUE"]:
return True
elif string in ["False", "false", "FALSE"]:
return False
raise ValueError("Can't convert to bool: %s" % string)
def safeStr(string):
"""
_safeStr_
Cast simple data (int, float, basestring) to string.
"""
if not isinstance(string, (tuple, list, set, dict)):
return str(string)
raise ValueError("We're not supposed to convert %s to string." % string)
def diskUse():
"""
This returns the % use of each disk partition
"""
diskPercent = []
df = subprocess.Popen(["df", "-klP"], stdout=subprocess.PIPE)
output = df.communicate()[0]
output = decodeBytesToUnicode(output).split("\n")
for x in output:
split = x.split()
if split != [] and split[0] != 'Filesystem':
diskPercent.append({'mounted': split[5], 'percent': split[4]})
return diskPercent
def numberCouchProcess():
"""
This returns the number of couch process
"""
ps = subprocess.Popen(["ps", "-ef"], stdout=subprocess.PIPE)
process = ps.communicate()[0]
process = decodeBytesToUnicode(process).count('couchjs')
return process
def rootUrlJoin(base, extend):
"""
Adds a path element to the path within a ROOT url
"""
if base:
match = re.match("^root://([^/]+)/(.+)", base)
if match:
host = match.group(1)
path = match.group(2)
newpath = os.path.join(path, extend)
newurl = "root://%s/%s" % (host, newpath)
return newurl
return None
def zipEncodeStr(message, maxLen=5120, compressLevel=9, steps=100, truncateIndicator=" (...)"):
"""
_zipEncodeStr_
Utility to zip a string and encode it.
If zipped encoded length is greater than maxLen,
truncate message until zip/encoded version
is within the limits allowed.
"""
message = encodeUnicodeToBytes(message)
encodedStr = zlib.compress(message, compressLevel)
encodedStr = base64.b64encode(encodedStr)
if len(encodedStr) < maxLen or maxLen == -1:
return encodedStr
compressRate = 1. * len(encodedStr) / len(base64.b64encode(message))
# Estimate new length for message zip/encoded version
# to be less than maxLen.
# Also, append truncate indicator to message.
truncateIndicator = encodeUnicodeToBytes(truncateIndicator)
strLen = int((maxLen - len(truncateIndicator)) / compressRate)
message = message[:strLen] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
# If new length is not short enough, truncate
# recursively by steps
while len(encodedStr) > maxLen:
message = message[:-steps - len(truncateIndicator)] + truncateIndicator
encodedStr = zipEncodeStr(message, maxLen=-1)
return encodedStr
def getSize(obj):
"""
_getSize_
Function to traverse an object and calculate its total size in bytes
:param obj: a python object
:return: an integer representing the total size of the object
Code extracted from Stack Overflow:
https://stackoverflow.com/questions/449560/how-do-i-determine-the-size-of-an-object-in-python
"""
# Custom objects know their class.
# Function objects seem to know way too much, including modules.
# Exclude modules as well.
BLACKLIST = type, ModuleType, FunctionType
if isinstance(obj, BLACKLIST):
raise TypeError('getSize() does not take argument of type: '+ str(type(obj)))
seen_ids = set()
size = 0
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, BLACKLIST) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
objects = get_referents(*need_referents)
return size
def decodeBytesToUnicode(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of bytes (i.e. in py2 `str` or
`future.types.newbytes.newbytes`, in py3 `bytes`), then it is converted to
a sequence of unicode codepoints.
This function is useful for cleaning input data when using the
"unicode sandwich" approach, which involves converting bytes (i.e. strings
of type sequence of bytes) to unicode (i.e. strings of type sequence of
unicode codepoints, in py2 `unicode` or `future.types.newstr.newstr`,
in py3 `str` ) as soon as possible when recieving input data, and
converting unicode back to bytes as late as possible.
achtung!:
- converting unicode back to bytes is not covered by this function
- converting unicode back to bytes is not always necessary. when in doubt,
do not do it.
Reference: https://nedbatchelder.com/text/unipain.html
py2:
- "errors" can be: "strict", "ignore", "replace",
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, bytes):
return value.decode("utf-8", errors)
return value
def decodeBytesToUnicodeConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call decodeBytesToUnicode(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply decodeBytesToUnicode,
maintaining brevity.
Parameters
----------
value : any
passed to decodeBytesToUnicode
errors: str
passed to decodeBytesToUnicode
condition: boolean of object with attribute __bool__()
if True, then we run decodeBytesToUnicode. Usually PY2/PY3
"""
if condition:
return decodeBytesToUnicode(value, errors)
return value
def encodeUnicodeToBytes(value, errors="strict"):
"""
Accepts an input "value" of generic type.
If "value" is a string of type sequence of unicode (i.e. in py2 `unicode` or
`future.types.newstr.newstr`, in py3 `str`), then it is converted to
a sequence of bytes.
This function is useful for encoding output data when using the
"unicode sandwich" approach, which involves converting unicode (i.e. strings
of type sequence of unicode codepoints) to bytes (i.e. strings of type
sequence of bytes, in py2 `str` or `future.types.newbytes.newbytes`,
in py3 `bytes`) as late as possible when passing a string to a third-party
function that only accepts bytes as input (pycurl's curl.setop is an
example).
py2:
- "errors" can be: "strict", "ignore", "replace", "xmlcharrefreplace"
- ref: https://docs.python.org/2/howto/unicode.html#the-unicode-type
py3:
- "errors" can be: "strict", "ignore", "replace", "backslashreplace",
"xmlcharrefreplace", "namereplace"
- ref: https://docs.python.org/3/howto/unicode.html#the-string-type
"""
if isinstance(value, str):
return value.encode("utf-8", errors)
return value
def encodeUnicodeToBytesConditional(value, errors="ignore", condition=True):
"""
if *condition*, then call encodeUnicodeToBytes(*value*, *errors*),
else return *value*
This may be useful when we want to conditionally apply encodeUnicodeToBytes,
maintaining brevity.
Parameters
----------
value : any
passed to encodeUnicodeToBytes
errors: str
passed to encodeUnicodeToBytes
condition: boolean of object with attribute __bool__()
if True, then we run encodeUnicodeToBytes. Usually PY2/PY3
"""
if condition:
return encodeUnicodeToBytes(value, errors)
return value | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/Utils/Utilities.py | 0.605216 | 0.272191 | Utilities.py | pypi |
from __future__ import print_function
from subprocess import Popen, PIPE
from Utils.PythonVersion import PY3
from WMCore.Storage.StageOutError import StageOutError
def runCommand(command):
"""
_runCommand_
Run the command without deadlocking stdout and stderr,
Returns the exitCode
"""
# capture stdout and stderr from command
if PY3:
# python2 pylint complains about `encoding` argument
child = Popen(command, shell=True, bufsize=1, stdin=PIPE, close_fds=True, encoding='utf8')
else:
child = Popen(command, shell=True, bufsize=1, stdin=PIPE, close_fds=True)
child.communicate()
retCode = child.returncode
return retCode
def runCommandWithOutput(command):
"""
_runCommandWithOutput_
Run the command without deadlocking stdout and stderr,
echo all output to sys.stdout and sys.stderr
Returns the exitCode and the a string containing std out & error
"""
# capture stdout and stderr from command
if PY3:
# python2 pylint complains about `encoding` argument
child = Popen(command, shell=True, bufsize=1, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True, encoding='utf8')
else:
child = Popen(command, shell=True, bufsize=1, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
sout, serr = child.communicate()
retCode = child.returncode
# If child is terminated by signal, err will be negative value. (Unix only)
sigStr = "Terminated by signal %s\n" % -retCode if retCode < 0 else ""
output = "%sstdout: %s\nstderr: %s" % (sigStr, sout, serr)
return retCode, output
def execute(command):
"""
_execute_
Execute the command provided, throw a StageOutError if it exits
non zero
"""
try:
exitCode, output = runCommandWithOutput(command)
msg = "Command exited with status: %s, Output: (%s)" % (exitCode, output)
print(msg)
except Exception as ex:
msg = "Command threw exception: %s" % str(ex)
print("ERROR: Exception During Stage Out:\n%s" % msg)
raise StageOutError(msg, Command=command, ExitCode=60311)
if exitCode:
msg = "Command exited non-zero: ExitCode:%s \nOutput (%s)" % (exitCode, output)
print("ERROR: Exception During Stage Out:\n%s" % msg)
raise StageOutError(msg, Command=command, ExitCode=exitCode)
return | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Storage/Execute.py | 0.579638 | 0.150809 | Execute.py | pypi |
from builtins import next, str, range
from future.utils import viewitems
from future import standard_library
standard_library.install_aliases()
import os
import re
from urllib.parse import urlsplit
from xml.dom.minidom import Document
from WMCore.Algorithms.ParseXMLFile import xmlFileToNode
_TFCArgSplit = re.compile("\?protocol=")
class TrivialFileCatalog(dict):
"""
_TrivialFileCatalog_
Object that can map LFNs to PFNs based on contents of a Trivial
File Catalog
"""
def __init__(self):
dict.__init__(self)
self['lfn-to-pfn'] = []
self['pfn-to-lfn'] = []
self.preferredProtocol = None # attribute for preferred protocol
def addMapping(self, protocol, match, result,
chain=None, mapping_type='lfn-to-pfn'):
"""
_addMapping_
Add an lfn to pfn mapping to this instance
"""
entry = {}
entry.setdefault("protocol", protocol)
entry.setdefault("path-match-expr", re.compile(match))
entry.setdefault("path-match", match)
entry.setdefault("result", result)
entry.setdefault("chain", chain)
self[mapping_type].append(entry)
def _doMatch(self, protocol, path, style, caller):
"""
Generalised way of building up the mappings.
caller is the method from there this method was called, it's used
for resolving chained rules
Return None if no match
"""
for mapping in self[style]:
if mapping['protocol'] != protocol:
continue
if mapping['path-match-expr'].match(path) or mapping["chain"] != None:
if mapping["chain"] != None:
oldpath = path
path = caller(mapping["chain"], path)
if not path:
continue
splitList = []
if len(mapping['path-match-expr'].split(path, 1)) > 1:
for split in range(len(mapping['path-match-expr'].split(path, 1))):
s = mapping['path-match-expr'].split(path, 1)[split]
if s:
splitList.append(s)
else:
path = oldpath
continue
result = mapping['result']
for split in range(len(splitList)):
result = result.replace("$" + str(split + 1), splitList[split])
return result
return None
def matchLFN(self, protocol, lfn):
"""
_matchLFN_
Return the result for the LFN provided if the LFN
matches the path-match for that protocol
Return None if no match
"""
result = self._doMatch(protocol, lfn, "lfn-to-pfn", self.matchLFN)
return result
def matchPFN(self, protocol, pfn):
"""
_matchLFN_
Return the result for the LFN provided if the LFN
matches the path-match for that protocol
Return None if no match
"""
result = self._doMatch(protocol, pfn, "pfn-to-lfn", self.matchPFN)
return result
def getXML(self):
"""
Converts TFC implementation (dict) into a XML string representation.
The method reflects this class implementation - dictionary containing
list of mappings while each mapping (i.e. entry, see addMapping
method) is a dictionary of key, value pairs.
"""
def _getElementForMappingEntry(entry, mappingStyle):
xmlDocTmp = Document()
element = xmlDocTmp.createElement(mappingStyle)
for k, v in viewitems(entry):
# ignore empty, None or compiled regexp items into output
if not v or (k == "path-match-expr"):
continue
element.setAttribute(k, str(v))
return element
xmlDoc = Document()
root = xmlDoc.createElement("storage-mapping") # root element name
for mappingStyle, mappings in viewitems(self):
for mapping in mappings:
mapElem = _getElementForMappingEntry(mapping, mappingStyle)
root.appendChild(mapElem)
return root.toprettyxml()
def __str__(self):
result = ""
for mapping in ['lfn-to-pfn', 'pfn-to-lfn']:
for item in self[mapping]:
result += "\t%s: protocol=%s path-match-re=%s result=%s" % (
mapping,
item['protocol'],
item['path-match-expr'].pattern,
item['result'])
if item['chain'] != None:
result += " chain=%s" % item['chain']
result += "\n"
return result
def tfcProtocol(contactString):
"""
_tfcProtocol_
Given a Trivial File Catalog contact string, extract the
protocol from it.
"""
args = urlsplit(contactString)[3]
value = args.replace("protocol=", '')
return value
def tfcFilename(contactString):
"""
_tfcFilename_
Extract the filename from a TFC contact string.
"""
value = contactString.replace("trivialcatalog_file:", "")
value = _TFCArgSplit.split(value)[0]
path = os.path.normpath(value)
return path
def readTFC(filename):
"""
_readTFC_
Read the file provided and return a TrivialFileCatalog
instance containing the details found in it
"""
if not os.path.exists(filename):
msg = "TrivialFileCatalog not found: %s" % filename
raise RuntimeError(msg)
try:
node = xmlFileToNode(filename)
except Exception as ex:
msg = "Error reading TrivialFileCatalog: %s\n" % filename
msg += str(ex)
raise RuntimeError(msg)
parsedResult = nodeReader(node)
tfcInstance = TrivialFileCatalog()
for mapping in ['lfn-to-pfn', 'pfn-to-lfn']:
for entry in parsedResult[mapping]:
protocol = entry.get("protocol", None)
match = entry.get("path-match", None)
result = entry.get("result", None)
chain = entry.get("chain", None)
if True in (protocol, match == None):
continue
tfcInstance.addMapping(str(protocol), str(match), str(result), chain, mapping)
return tfcInstance
def loadTFC(contactString):
"""
_loadTFC_
Given the contact string for the tfc, parse out the file location
and the protocol and create a TFC instance
"""
protocol = tfcProtocol(contactString)
catalog = tfcFilename(contactString)
instance = readTFC(catalog)
instance.preferredProtocol = protocol
return instance
def coroutine(func):
"""
_coroutine_
Decorator method used to prime coroutines
"""
def start(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start
def nodeReader(node):
"""
_nodeReader_
Given a node, see if we can find what we're looking for
"""
processLfnPfn = {
'path-match': processPathMatch(),
'protocol': processProtocol(),
'result': processResult(),
'chain': processChain()
}
report = {'lfn-to-pfn': [], 'pfn-to-lfn': []}
processSMT = processSMType(processLfnPfn)
processor = expandPhEDExNode(processStorageMapping(processSMT))
processor.send((report, node))
return report
@coroutine
def expandPhEDExNode(target):
"""
_expandPhEDExNode_
If pulling a TFC from the PhEDEx DS, its wrapped in a top level <phedex> node,
this routine handles that extra node if it exists
"""
while True:
report, node = (yield)
sentPhedex = False
for subnode in node.children:
if subnode.name == "phedex":
target.send((report, subnode))
sentPhedex = True
if not sentPhedex:
target.send((report, node))
@coroutine
def processStorageMapping(target):
"""
Process everything
"""
while True:
report, node = (yield)
for subnode in node.children:
if subnode.name == 'storage-mapping':
target.send((report, subnode))
@coroutine
def processSMType(targets):
"""
Process the type of storage-mapping
"""
while True:
report, node = (yield)
for subnode in node.children:
if subnode.name in ['lfn-to-pfn', 'pfn-to-lfn']:
tmpReport = {'path-match-expr': subnode.name}
targets['protocol'].send((tmpReport, subnode.attrs.get('protocol', None)))
targets['path-match'].send((tmpReport, subnode.attrs.get('path-match', None)))
targets['result'].send((tmpReport, subnode.attrs.get('result', None)))
targets['chain'].send((tmpReport, subnode.attrs.get('chain', None)))
report[subnode.name].append(tmpReport)
@coroutine
def processPathMatch():
"""
Process path-match
"""
while True:
report, value = (yield)
report['path-match'] = value
@coroutine
def processProtocol():
"""
Process protocol
"""
while True:
report, value = (yield)
report['protocol'] = value
@coroutine
def processResult():
"""
Process result
"""
while True:
report, value = (yield)
report['result'] = value
@coroutine
def processChain():
"""
Process chain
"""
while True:
report, value = (yield)
if value == "":
report['chain'] = None
else:
report['chain'] = value | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Storage/TrivialFileCatalog.py | 0.637257 | 0.167253 | TrivialFileCatalog.py | pypi |
import json
import urllib
from urllib.parse import urlparse, parse_qs, quote_plus
from collections import defaultdict
from Utils.CertTools import cert, ckey
from dbs.apis.dbsClient import aggFileLumis, aggFileParents
from WMCore.Services.pycurl_manager import getdata as multi_getdata
from Utils.PortForward import PortForward
def dbsListFileParents(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileParents API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file parents
"""
urls = ['%s/fileparents?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileParents
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsListFileLumis(dbsUrl, blocks):
"""
Concurrent counter part of DBS listFileLumis API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of file lumis
"""
urls = ['%s/filelumis?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = aggFileLumis
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsBlockOrigin(dbsUrl, blocks):
"""
Concurrent counter part of DBS files API
:param dbsUrl: DBS URL
:param blocks: list of blocks
:return: list of block origins for a given parent lfns
"""
urls = ['%s/blockorigin?block_name=%s' % (dbsUrl, quote_plus(b)) for b in blocks]
func = None
uKey = 'block_name'
return getUrls(urls, func, uKey)
def dbsParentFilesGivenParentDataset(dbsUrl, parentDataset, fInfo):
"""
Obtain parent files for given fileInfo object
:param dbsUrl: DBS URL
:param parentDataset: parent dataset name
:param fInfo: file info object
:return: list of parent files for given file info object
"""
portForwarder = PortForward(8443)
urls = []
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
urls.append(portForwarder(url))
func = None
uKey = None
rdict = getUrls(urls, func, uKey)
parentFiles = defaultdict(set)
for fileInfo in fInfo:
run = fileInfo['run_num']
lumis = urllib.parse.quote_plus(str(fileInfo['lumi_section_num']))
url = f'{dbsUrl}/files?dataset={parentDataset}&run_num={run}&lumi_list={lumis}'
url = portForwarder(url)
if url in rdict:
pFileList = rdict[url]
pFiles = {x['logical_file_name'] for x in pFileList}
parentFiles[fileInfo['logical_file_name']] = \
parentFiles[fileInfo['logical_file_name']].union(pFiles)
return parentFiles
def getUrls(urls, aggFunc, uKey=None):
"""
Perform parallel DBS calls for given set of urls and apply given aggregation
function to the results.
:param urls: list of DBS urls to call
:param aggFunc: aggregation function
:param uKey: url parameter to use for final dictionary
:return: dictionary of resuls where keys are urls and values are obtained results
"""
data = multi_getdata(urls, ckey(), cert())
rdict = {}
for row in data:
url = row['url']
code = int(row.get('code', 200))
error = row.get('error')
if code != 200:
msg = f"Fail to query {url}. Error: {code} {error}"
raise RuntimeError(msg)
if uKey:
key = urlParams(url).get(uKey)
else:
key = url
data = row.get('data', [])
res = json.loads(data)
if aggFunc:
rdict[key] = aggFunc(res)
else:
rdict[key] = res
return rdict
def urlParams(url):
"""
Return dictionary of URL parameters
:param url: URL link
:return: dictionary of URL parameters
"""
parsedUrl = urlparse(url)
rdict = parse_qs(parsedUrl.query)
for key, vals in rdict.items():
if len(vals) == 1:
rdict[key] = vals[0]
return rdict | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Services/DBS/DBSUtils.py | 0.572484 | 0.162746 | DBSUtils.py | pypi |
from __future__ import (division, print_function)
from builtins import str, bytes
from Utils.Utilities import encodeUnicodeToBytes
from io import BytesIO
import re
import xml.etree.cElementTree as ET
int_number_pattern = re.compile(r'(^[0-9-]$|^[0-9-][0-9]*$)')
float_number_pattern = re.compile(r'(^[-]?\d+\.\d*$|^\d*\.{1,1}\d+$)')
def adjust_value(value):
"""
Change null value to None.
"""
pat_float = float_number_pattern
pat_integer = int_number_pattern
if isinstance(value, str):
if value == 'null' or value == '(null)':
return None
elif pat_float.match(value):
return float(value)
elif pat_integer.match(value):
return int(value)
else:
return value
else:
return value
def xml_parser(data, prim_key):
"""
Generic XML parser
:param data: can be of type "file object", unicode string or bytes string
"""
if isinstance(data, (str, bytes)):
stream = BytesIO()
data = encodeUnicodeToBytes(data, "ignore")
stream.write(data)
stream.seek(0)
else:
stream = data
context = ET.iterparse(stream)
for event, elem in context:
row = {}
key = elem.tag
if key != prim_key:
continue
row[key] = elem.attrib
get_children(elem, event, row, key)
elem.clear()
yield row
def get_children(elem, event, row, key):
"""
xml_parser helper function. It gets recursively information about
children for given element tag. Information is stored into provided
row for given key. The change of notations can be applied during
parsing step by using provided notations dictionary.
"""
for child in elem.getchildren():
child_key = child.tag
child_data = child.attrib
if not child_data:
child_dict = adjust_value(child.text)
else:
child_dict = child_data
if child.getchildren(): # we got grand-children
if child_dict:
row[key][child_key] = child_dict
else:
row[key][child_key] = {}
if isinstance(child_dict, dict):
newdict = {child_key: child_dict}
else:
newdict = {child_key: {}}
get_children(child, event, newdict, child_key)
row[key][child_key] = newdict[child_key]
else:
if not isinstance(row[key], dict):
row[key] = {}
row[key].setdefault(child_key, [])
row[key][child_key].append(child_dict)
child.clear() | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Services/TagCollector/XMLUtils.py | 0.567697 | 0.201794 | XMLUtils.py | pypi |
from __future__ import division
from builtins import object
from datetime import timedelta, datetime
import socket
import json
import logging
from WMCore.Services.pycurl_manager import RequestHandler
from Utils.Timers import LocalTimezone
class AlertManagerAPI(object):
"""
A class used to send alerts via the MONIT AlertManager API
"""
def __init__(self, alertManagerUrl, logger=None):
self.alertManagerUrl = alertManagerUrl
# sender's hostname is added as an annotation
self.hostname = socket.gethostname()
self.mgr = RequestHandler()
self.ltz = LocalTimezone()
self.headers = {"Content-Type": "application/json"}
self.validSeverity = ["high", "medium", "low"]
self.logger = logger if logger else logging.getLogger()
def sendAlert(self, alertName, severity, summary, description, service, tag="wmcore", endSecs=600, generatorURL=""):
"""
:param alertName: a unique name for the alert
:param severity: low, medium, high
:param summary: a short description of the alert
:param description: a longer informational message with details about the alert
:param service: the name of the service firing an alert
:param tag: a unique tag used to help route the alert
:param endSecs: how many minutes until the alarm is silenced
:param generatorURL: this URL will be sent to AlertManager and configured as a clickable "Source" link in the web interface
AlertManager JSON format reference: https://www.prometheus.io/docs/alerting/latest/clients/
[
{
"labels": {
"alertname": "<requiredAlertName>",
"<labelname>": "<labelvalue>",
...
},
"annotations": {
"<labelname>": "<labelvalue>",
...
},
"startsAt": "<rfc3339>", # optional, will be current time if not present
"endsAt": "<rfc3339>",
"generatorURL": "<generator_url>" # optional
},
]
"""
if not self._isValidSeverity(severity):
return False
request = []
alert = {}
labels = {}
annotations = {}
# add labels
labels["alertname"] = alertName
labels["severity"] = severity
labels["tag"] = tag
labels["service"] = service
alert["labels"] = labels
# add annotations
annotations["hostname"] = self.hostname
annotations["summary"] = summary
annotations["description"] = description
alert["annotations"] = annotations
# In python3 we won't need the LocalTimezone class
# Will change to d = datetime.now().astimezone() + timedelta(seconds=endSecs)
d = datetime.now(self.ltz) + timedelta(seconds=endSecs)
alert["endsAt"] = d.isoformat("T")
alert["generatorURL"] = generatorURL
request.append(alert)
# need to do this because pycurl_manager only accepts dict and encoded strings type
params = json.dumps(request)
res = self.mgr.getdata(self.alertManagerUrl, params=params, headers=self.headers, verb='POST')
return res
def _isValidSeverity(self, severity):
"""
Used to check if the severity of the alert matches the valid levels: low, medium, high
:param severity: severity of the alert
:return: True or False
"""
if severity not in self.validSeverity:
logging.critical("Alert submitted to AlertManagerAPI with invalid severity: %s", severity)
return False
return True | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Services/AlertManager/AlertManagerAPI.py | 0.810554 | 0.161849 | AlertManagerAPI.py | pypi |
from __future__ import division
from builtins import object
import os.path
from WMCore.DataStructs.File import File
from WMCore.FwkJobReport import Report
from WMCore.Services.UUIDLib import makeUUID
class ReportEmu(object):
"""
_ReportEmu_
Job Report Emulator that creates a Report given a WMTask/WMStep and a Job instance.
"""
def __init__(self, **options):
"""
___init___
Options contain the settings for producing the report instance from the provided step
"""
self.step = options.get("WMStep", None)
self.job = options.get("Job", None)
return
def addInputFilesToReport(self, report):
"""
_addInputFilesToReport_
Pull all of the input files out of the job and add them to the report.
"""
report.addInputSource("PoolSource")
for inputFile in self.job["input_files"]:
inputFileSection = report.addInputFile("PoolSource", lfn=inputFile["lfn"],
size=inputFile["size"],
events=inputFile["events"])
Report.addRunInfoToFile(inputFileSection, inputFile["runs"])
return
def determineOutputSize(self):
"""
_determineOutputSize_
Determine the total size of and number of events in the input files and
use the job mask to scale that to something that would reasonably
approximate the size of and number of events in the output.
"""
totalSize = 0
totalEvents = 0
for inputFile in self.job["input_files"]:
totalSize += inputFile["size"]
totalEvents += inputFile["events"]
if self.job["mask"]["FirstEvent"] is not None and \
self.job["mask"]["LastEvent"] is not None:
outputTotalEvents = self.job["mask"]["LastEvent"] - self.job["mask"]["FirstEvent"] + 1
else:
outputTotalEvents = totalEvents
outputSize = int(totalSize * outputTotalEvents / totalEvents )
return (outputSize, outputTotalEvents)
def addOutputFilesToReport(self, report):
"""
_addOutputFilesToReport_
Add output files to every output module in the step. Scale the size
and number of events in the output files appropriately.
"""
(outputSize, outputEvents) = self.determineOutputSize()
if not os.path.exists('ReportEmuTestFile.txt'):
with open('ReportEmuTestFile.txt', 'w') as f:
f.write('A Shubbery')
for outputModuleName in self.step.listOutputModules():
outputModuleSection = self.step.getOutputModule(outputModuleName)
outputModuleSection.fixedLFN = False
outputModuleSection.disableGUID = False
outputLFN = "%s/%s.root" % (outputModuleSection.lfnBase,
str(makeUUID()))
outputFile = File(lfn=outputLFN, size=outputSize, events=outputEvents,
merged=False)
outputFile.setLocation(self.job["location"])
outputFile['pfn'] = "ReportEmuTestFile.txt"
outputFile['guid'] = "ThisIsGUID"
outputFile["checksums"] = {"adler32": "1234", "cksum": "5678"}
outputFile["dataset"] = {"primaryDataset": outputModuleSection.primaryDataset,
"processedDataset": outputModuleSection.processedDataset,
"dataTier": outputModuleSection.dataTier,
"applicationName": "cmsRun",
"applicationVersion": self.step.getCMSSWVersion()}
outputFile["module_label"] = outputModuleName
outputFileSection = report.addOutputFile(outputModuleName, outputFile)
for inputFile in self.job["input_files"]:
Report.addRunInfoToFile(outputFileSection, inputFile["runs"])
return
def __call__(self):
report = Report.Report(self.step.name())
report.id = self.job["id"]
report.task = self.job["task"]
report.workload = None
self.addInputFilesToReport(report)
self.addOutputFilesToReport(report)
return report | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/FwkJobReport/ReportEmu.py | 0.624294 | 0.164483 | ReportEmu.py | pypi |
from builtins import str
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class DBCreator(DBFormatter):
"""
_DBCreator_
Generic class for creating database tables.
"""
def __init__(self, logger, dbinterface):
"""
_init_
Call the constructor of the parent class and create empty dictionaries
to hold table create statements, constraint statements and insert
statements.
"""
DBFormatter.__init__(self, logger, dbinterface)
self.create = {}
self.constraints = {}
self.inserts = {}
self.indexes = {}
def execute(self, conn = None, transaction = False):
"""
_execute_
Generic method to create tables and constraints by execute
sql statements in the create, and constraints dictionaries.
Before execution the keys assigned to the tables in the self.create
dictionary are sorted, to offer the possibilitiy of executing
table creation in a certain order.
"""
# create tables
for i in sorted(self.create.keys()):
try:
self.dbi.processData(self.create[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.create[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# create indexes
for i in self.indexes:
try:
self.dbi.processData(self.indexes[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.indexes[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# set constraints
for i in self.constraints:
try:
self.dbi.processData(self.constraints[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.constraints[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
# insert permanent data
for i in self.inserts:
try:
self.dbi.processData(self.inserts[i],
conn = conn,
transaction = transaction)
except Exception as e:
msg = WMEXCEPTION['WMCORE-2'] + '\n\n' +\
str(self.inserts[i]) +'\n\n' +str(e)
self.logger.debug( msg )
raise WMException(msg,'WMCORE-2')
return True
def __str__(self):
"""
_str_
Return a well formatted text representation of the schema held in the
self.create, self.constraints, self.inserts, self.indexes dictionaries.
"""
string = ''
for i in self.create, self.constraints, self.inserts, self.indexes:
for j in i:
string = string + i[j].lstrip() + '\n'
return string | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Database/DBCreator.py | 0.526586 | 0.233969 | DBCreator.py | pypi |
import logging
import time
from WMCore.DataStructs.WMObject import WMObject
from WMCore.WMException import WMException
from WMCore.WMExceptions import WMEXCEPTION
class Transaction(WMObject):
dbi = None
def __init__(self, dbinterface = None):
"""
Get the connection from the DBInterface and open a new transaction on it
"""
self.dbi = dbinterface
self.conn = None
self.transaction = None
def begin(self):
if self.conn == None:
self.conn = self.dbi.connection()
if self.conn.closed:
self.conn = self.dbi.connection()
if self.transaction == None:
self.transaction = self.conn.begin()
return
def processData(self, sql, binds={}):
"""
Propagates the request to the proper dbcore backend,
and performs checks for lost (or closed) connection.
"""
result = self.dbi.processData(sql, binds, conn = self.conn,
transaction = True)
return result
def commit(self):
"""
Commit the transaction and return the connection to the pool
"""
if not self.transaction == None:
self.transaction.commit()
if not self.conn == None:
self.conn.close()
self.conn = None
self.transaction = None
def rollback(self):
"""
To be called if there is an exception and you want to roll back the
transaction and return the connection to the pool
"""
if self.transaction:
self.transaction.rollback()
if self.conn:
self.conn.close()
self.conn = None
self.transaction = None
return
def rollbackForError(self):
"""
This is called when handling a major exception. This is because sometimes
you can end up in a situation where the transaction appears open, but is not. In
this case, calling a rollback on the transaction will cause an exception, which
then destroys all logging and shutdown of the actual code.
Use only in components.
"""
try:
self.rollback()
except:
pass
return | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Database/Transaction.py | 0.487063 | 0.150809 | Transaction.py | pypi |
from copy import copy
from Utils.IteratorTools import grouper
import WMCore.WMLogging
from WMCore.DataStructs.WMObject import WMObject
from WMCore.Database.ResultSet import ResultSet
class DBInterface(WMObject):
"""
Base class for doing SQL operations using a SQLAlchemy engine, or
pre-exisitng connection.
processData will take a (list of) sql statements and a (list of)
bind variable dictionaries and run the statements on the DB. If
necessary it will substitute binds into the sql (MySQL).
TODO:
Add in some suitable exceptions in one or two places
Test the hell out of it
Support executemany()
"""
logger = None
engine = None
def __init__(self, logger, engine):
self.logger = logger
self.logger.info ("Instantiating base WM DBInterface")
self.engine = engine
self.maxBindsPerQuery = 500
def buildbinds(self, sequence, thename, therest=[{}]):
"""
Build a list of binds. Can be used recursively, e.g.:
buildbinds(file, 'file', buildbinds(pnn, 'location'), {'lumi':123})
TODO: replace with an appropriate map function
"""
binds = []
for r in sequence:
for i in self.makelist(therest):
thebind = copy(i)
thebind[thename] = r
binds.append(thebind)
return binds
def executebinds(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executebinds_
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
if b == None:
resultProxy = connection.execute(s)
else:
resultProxy = connection.execute(s, b)
if returnCursor:
return resultProxy
result = ResultSet()
result.add(resultProxy)
resultProxy.close()
return result
def executemanybinds(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executemanybinds_
b is a list of dictionaries for the binds, e.g.:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
see: http://www.gingerandjohn.com/archives/2004/02/26/cx_oracle-executemany-example/
Can't executemany() selects - so do each combination of binds here instead.
This will return a list of sqlalchemy.engine.base.ResultProxy object's
one for each set of binds.
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
s = s.strip()
if s.lower().endswith('select', 0, 6):
"""
Trying to select many
"""
if returnCursor:
result = []
for bind in b:
result.append(connection.execute(s, bind))
else:
result = ResultSet()
for bind in b:
resultproxy = connection.execute(s, bind)
result.add(resultproxy)
resultproxy.close()
return self.makelist(result)
"""
Now inserting or updating many
"""
result = connection.execute(s, b)
return self.makelist(result)
def connection(self):
"""
Return a connection to the engine (from the connection pool)
"""
return self.engine.connect()
def processData(self, sqlstmt, binds={}, conn=None,
transaction=False, returnCursor=False):
"""
set conn if you already have an active connection to reuse
set transaction = True if you already have an active transaction
"""
connection = None
try:
if not conn:
connection = self.connection()
else:
connection = conn
result = []
# Can take either a single statement or a list of statements and binds
sqlstmt = self.makelist(sqlstmt)
binds = self.makelist(binds)
if len(sqlstmt) > 0 and (len(binds) == 0 or (binds[0] == {} or binds[0] == None)):
# Should only be run by create statements
if not transaction:
#WMCore.WMLogging.sqldebug("transaction created in DBInterface")
trans = connection.begin()
for i in sqlstmt:
r = self.executebinds(i, connection=connection,
returnCursor=returnCursor)
result.append(r)
if not transaction:
trans.commit()
elif len(binds) > len(sqlstmt) and len(sqlstmt) == 1:
#Run single SQL statement for a list of binds - use execute_many()
if not transaction:
trans = connection.begin()
for subBinds in grouper(binds, self.maxBindsPerQuery):
result.extend(self.executemanybinds(sqlstmt[0], subBinds,
connection=connection, returnCursor=returnCursor))
if not transaction:
trans.commit()
elif len(binds) == len(sqlstmt):
# Run a list of SQL for a list of binds
if not transaction:
trans = connection.begin()
for i, s in enumerate(sqlstmt):
b = binds[i]
r = self.executebinds(s, b, connection=connection,
returnCursor=returnCursor)
result.append(r)
if not transaction:
trans.commit()
else:
self.logger.exception(
"DBInterface.processData Nothing executed, problem with your arguments")
self.logger.exception(
"DBInterface.processData SQL = %s" % sqlstmt)
WMCore.WMLogging.sqldebug('DBInterface.processData sql is %s items long' % len(sqlstmt))
WMCore.WMLogging.sqldebug('DBInterface.processData binds are %s items long' % len(binds))
assert_value = False
if len(binds) == len(sqlstmt):
assert_value = True
WMCore.WMLogging.sqldebug('DBInterface.processData are binds and sql same length? : %s' % (assert_value))
WMCore.WMLogging.sqldebug('sql: %s\n binds: %s\n, connection:%s\n, transaction:%s\n' %
(sqlstmt, binds, connection, transaction))
WMCore.WMLogging.sqldebug('type check:\nsql: %s\n binds: %s\n, connection:%s\n, transaction:%s\n' %
(type(sqlstmt), type(binds), type(connection), type(transaction)))
raise Exception("""DBInterface.processData Nothing executed, problem with your arguments
Probably mismatched sizes for sql (%i) and binds (%i)""" % (len(sqlstmt), len(binds)))
finally:
if not conn and connection != None:
connection.close() # Return connection to the pool
return result | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Database/DBCore.py | 0.414069 | 0.245401 | DBCore.py | pypi |
import copy
from WMCore.Database.DBCore import DBInterface
from WMCore.Database.ResultSet import ResultSet
def bindVarCompare(a):
"""
_bindVarCompare_
Bind variables are represented as a tuple with the first element being the
variable name and the second being it's position in the query. We sort on
the position in the query.
"""
return a[1]
def stringLengthCompare(a):
"""
_stringLengthCompare_
Sort comparison function to sort strings by length.
Since we want to sort from longest to shortest, this must be reversed when used
"""
return len(a)
class MySQLInterface(DBInterface):
def substitute(self, origSQL, origBindsList):
"""
_substitute_
Transform as set of bind variables from a list of dictionaries to a list
of tuples:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
Will be transformed into:
b = [ ('value1a', 'value2a'), ('value1b', 'value2b')]
Don't need to substitute in the binds as executemany does that
internally. But the sql will also need to be reformatted, such that
:bind_name becomes %s.
See: http://www.devshed.com/c/a/Python/MySQL-Connectivity-With-Python/5/
"""
if origBindsList == None:
return origSQL, None
origBindsList = self.makelist(origBindsList)
origBind = origBindsList[0]
bindVarPositionList = []
updatedSQL = copy.copy(origSQL)
# We process bind variables from longest to shortest to avoid a shorter
# bind variable matching a longer one. For example if we have two bind
# variables: RELEASE_VERSION and RELEASE_VERSION_ID the former will
# match against the latter, causing problems. We'll sort the variable
# names by length to guard against this.
bindVarNames = list(origBind)
bindVarNames.sort(key=stringLengthCompare, reverse=True)
bindPositions = {}
for bindName in bindVarNames:
searchPosition = 0
while True:
bindPosition = origSQL.lower().find(":%s" % bindName.lower(),
searchPosition)
if bindPosition == -1:
break
if bindPosition not in bindPositions:
bindPositions[bindPosition] = 0
bindVarPositionList.append((bindName, bindPosition))
searchPosition = bindPosition + 1
searchPosition = 0
while True:
bindPosition = updatedSQL.lower().find(":%s" % bindName.lower(),
searchPosition)
if bindPosition == -1:
break
left = updatedSQL[0:bindPosition]
right = updatedSQL[bindPosition + len(bindName) + 1:]
updatedSQL = left + "%s" + right
bindVarPositionList.sort(key=bindVarCompare)
mySQLBindVarsList = []
for origBind in origBindsList:
mySQLBindVars = []
for bindVarPosition in bindVarPositionList:
mySQLBindVars.append(origBind[bindVarPosition[0]])
mySQLBindVarsList.append(tuple(mySQLBindVars))
return (updatedSQL, mySQLBindVarsList)
def executebinds(self, s = None, b = None, connection = None,
returnCursor = False):
"""
_executebinds_
Execute a SQL statement that has a single set of bind variables.
Transform the bind variables into the format that MySQL expects.
"""
s, b = self.substitute(s, b)
return DBInterface.executebinds(self, s, b, connection, returnCursor)
def executemanybinds(self, s = None, b = None, connection = None,
returnCursor = False):
"""
_executemanybinds_
Execute a SQL statement that has multiple sets of bind variables.
Transform the bind variables into the format that MySQL expects.
"""
newsql, binds = self.substitute(s, b)
return DBInterface.executemanybinds(self, newsql, binds, connection,
returnCursor) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Database/MySQLCore.py | 0.637031 | 0.431105 | MySQLCore.py | pypi |
from __future__ import print_function
from builtins import str, bytes, int
from future.utils import viewitems
from Utils.PythonVersion import PY2
import sys
import types
class _EmptyClass(object):
pass
class JSONThunker(object):
"""
_JSONThunker_
Converts an arbitrary object to <-> from a jsonable object.
Will, for the most part "do the right thing" about various instance objects
by storing their class information along with their data in a dict. Handles
a recursion limit to prevent infinite recursion.
self.passThroughTypes - stores a list of types that should be passed
through unchanged to the JSON parser
self.blackListedModules - a list of modules that should not be stored in
the JSON.
"""
def __init__(self):
self.passThroughTypes = (type(None),
bool,
int,
float,
complex,
str,
bytes,
)
# objects that inherit from dict should be treated as a dict
# they don't store their data in __dict__. There was enough
# of those classes that it warrented making a special case
self.dictSortOfObjects = (('WMCore.Datastructs.Job', 'Job'),
('WMCore.WMBS.Job', 'Job'),
('WMCore.Database.CMSCouch', 'Document'))
# ditto above, but for lists
self.listSortOfObjects = (('WMCore.DataStructs.JobPackage', 'JobPackage'),
('WMCore.WMBS.JobPackage', 'JobPackage'),)
self.foundIDs = {}
# modules we don't want JSONed
self.blackListedModules = ('sqlalchemy.engine.threadlocal',
'WMCore.Database.DBCore',
'logging',
'WMCore.DAOFactory',
'WMCore.WMFactory',
'WMFactory',
'WMCore.Configuration',
'WMCore.Database.Transaction',
'threading',
'datetime')
def checkRecursion(self, data):
"""
handles checking for infinite recursion
"""
if id(data) in self.foundIDs:
if self.foundIDs[id(data)] > 5:
self.unrecurse(data)
return "**RECURSION**"
else:
self.foundIDs[id(data)] += 1
return data
else:
self.foundIDs[id(data)] = 1
return data
def unrecurse(self, data):
"""
backs off the recursion counter if we're returning from _thunk
"""
try:
self.foundIDs[id(data)] -= 1
except:
print("Could not find count for id %s of type %s data %s" % (id(data), type(data), data))
raise
def checkBlackListed(self, data):
"""
checks to see if a given object is from a blacklisted module
"""
try:
# special case
if data.__class__.__module__ == 'WMCore.Database.CMSCouch' and data.__class__.__name__ == 'Document':
data.__class__ = type({})
return data
if data.__class__.__module__ in self.blackListedModules:
return "Blacklisted JSON object: module %s, name %s, str() %s" % \
(data.__class__.__module__, data.__class__.__name__, str(data))
else:
return data
except Exception:
return data
def thunk(self, toThunk):
"""
Thunk - turns an arbitrary object into a JSONable object
"""
self.foundIDs = {}
data = self._thunk(toThunk)
return data
def unthunk(self, data):
"""
unthunk - turns a previously 'thunked' object back into a python object
"""
return self._unthunk(data)
def handleSetThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
tempDict = {'thunker_encoded_json': True, 'type': 'set'}
tempDict['set'] = self._thunk(list(toThunk))
self.unrecurse(toThunk)
return tempDict
def handleListThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
for k, v in enumerate(toThunk):
toThunk[k] = self._thunk(v)
self.unrecurse(toThunk)
return toThunk
def handleDictThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
special = False
tmpdict = {}
for k, v in viewitems(toThunk):
if type(k) == type(int):
special = True
tmpdict['_i:%s' % k] = self._thunk(v)
elif type(k) == type(float):
special = True
tmpdict['_f:%s' % k] = self._thunk(v)
else:
tmpdict[k] = self._thunk(v)
if special:
toThunk['thunker_encoded_json'] = self._thunk(True)
toThunk['type'] = self._thunk('dict')
toThunk['dict'] = tmpdict
else:
toThunk.update(tmpdict)
self.unrecurse(toThunk)
return toThunk
def handleObjectThunk(self, toThunk):
toThunk = self.checkRecursion(toThunk)
toThunk = self.checkBlackListed(toThunk)
if isinstance(toThunk, (str, bytes)):
# things that got blacklisted
return toThunk
if hasattr(toThunk, '__to_json__'):
# Use classes own json thunker
toThunk2 = toThunk.__to_json__(self)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, dict):
toThunk2 = self.handleDictObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
elif isinstance(toThunk, list):
# a mother thunking list
toThunk2 = self.handleListObjectThunk(toThunk)
self.unrecurse(toThunk)
return toThunk2
else:
try:
thunktype = '%s.%s' % (toThunk.__class__.__module__,
toThunk.__class__.__name__)
tempDict = {'thunker_encoded_json': True, 'type': thunktype}
tempDict[thunktype] = self._thunk(toThunk.__dict__)
self.unrecurse(toThunk)
return tempDict
except Exception as e:
tempDict = {'json_thunk_exception_': "%s" % e}
self.unrecurse(toThunk)
return tempDict
def handleDictObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_dict': True,
'type': thunktype,
thunktype: {}}
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
for k, v in viewitems(data):
tempDict[thunktype][k] = self._thunk(v)
return tempDict
def handleDictObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_dict', False)
thunktype = data.pop('type', False)
for k, v in viewitems(data):
if k == thunktype:
for k2, v2 in viewitems(data[thunktype]):
value[k2] = self._unthunk(v2)
else:
value.__dict__[k] = self._unthunk(v)
return value
def handleListObjectThunk(self, data):
thunktype = '%s.%s' % (data.__class__.__module__,
data.__class__.__name__)
tempDict = {'thunker_encoded_json': True,
'is_list': True,
'type': thunktype,
thunktype: []}
for k, v in enumerate(data):
tempDict['thunktype'].append(self._thunk(v))
for k, v in viewitems(data.__dict__):
tempDict[k] = self._thunk(v)
return tempDict
def handleListObjectUnThunk(self, value, data):
data.pop('thunker_encoded_json', False)
data.pop('is_list', False)
thunktype = data.pop('type')
for k, v in viewitems(data[thunktype]):
setattr(value, k, self._unthunk(v))
for k, v in viewitems(data):
if k == thunktype:
continue
value.__dict__ = self._unthunk(v)
return value
def _thunk(self, toThunk):
"""
helper function for thunk, does the actual work
"""
if isinstance(toThunk, self.passThroughTypes):
return toThunk
elif type(toThunk) is list:
return self.handleListThunk(toThunk)
elif type(toThunk) is dict:
return self.handleDictThunk(toThunk)
elif type(toThunk) is set:
return self.handleSetThunk(toThunk)
elif type(toThunk) is types.FunctionType:
self.unrecurse(toThunk)
return "function reference"
elif isinstance(toThunk, object):
return self.handleObjectThunk(toThunk)
else:
self.unrecurse(toThunk)
raise RuntimeError(type(toThunk))
def _unthunk(self, jsondata):
"""
_unthunk - does the actual work for unthunk
"""
if PY2 and type(jsondata) is str:
return jsondata.encode("utf-8")
if type(jsondata) is dict:
if 'thunker_encoded_json' in jsondata:
# we've got a live one...
if jsondata['type'] == 'set':
newSet = set()
for i in self._unthunk(jsondata['set']):
newSet.add(self._unthunk(i))
return newSet
if jsondata['type'] == 'dict':
# We have a "special" dict
data = {}
for k, v in viewitems(jsondata['dict']):
tmp = self._unthunk(v)
if k.startswith('_i:'):
data[int(k.lstrip('_i:'))] = tmp
elif k.startswith('_f:'):
data[float(k.lstrip('_f:'))] = tmp
else:
data[k] = tmp
return data
else:
# spawn up an instance.. good luck
# here be monsters
# inspired from python's pickle code
ourClass = self.getThunkedClass(jsondata)
value = _EmptyClass()
if hasattr(ourClass, '__from_json__'):
# Use classes own json loader
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = ourClass.__from_json__(value, jsondata, self)
elif 'thunker_encoded_json' in jsondata and 'is_dict' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleDictObjectUnThunk(value, jsondata)
elif 'thunker_encoded_json' in jsondata:
try:
value.__class__ = ourClass
except Exception:
value = ourClass()
value = self.handleListObjectUnThunk(value, jsondata)
else:
raise RuntimeError('Could not unthunk a class. Code to try was removed because it had errors.')
return value
else:
data = {}
for k, v in viewitems(jsondata):
data[k] = self._unthunk(v)
return data
else:
return jsondata
@staticmethod
def getThunkedClass(jsondata):
"""
Work out the class from it's thunked json representation
"""
module = jsondata['type'].rsplit('.', 1)[0]
name = jsondata['type'].rsplit('.', 1)[1]
if (module == 'WMCore.Services.Requests') and (name == JSONThunker):
raise RuntimeError("Attempted to unthunk a JSONThunker..")
__import__(module)
mod = sys.modules[module]
ourClass = getattr(mod, name)
return ourClass | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Wrappers/JsonWrapper/JSONThunker.py | 0.443118 | 0.360208 | JSONThunker.py | pypi |
from builtins import next, str, object
from future.utils import viewitems
import xml.parsers.expat
class Node(object):
"""
_Node_
Really simple DOM like container to simplify parsing the XML file
and formatting the character data without all the whitespace guff
"""
def __init__(self, name, attrs):
self.name = str(name)
self.attrs = {}
self.text = None
for k, v in viewitems(attrs):
self.attrs.__setitem__(str(k), str(v))
self.children = []
def __str__(self):
result = " %s %s \"%s\"\n" % (self.name, self.attrs, self.text)
for child in self.children:
result += str(child)
return result
def coroutine(func):
"""
_coroutine_
Decorator method used to prime coroutines
"""
def start(*args,**kwargs):
cr = func(*args,**kwargs)
next(cr)
return cr
return start
def xmlFileToNode(reportFile):
"""
_xmlFileToNode_
Use expat and the build coroutine to parse the XML file and build
a node structure
"""
node = Node("JobReports", {})
expat_parse(open(reportFile, 'rb'),
build(node))
return node
def expat_parse(f, target):
"""
_expat_parse_
Expat based XML parsing that feeds a node building coroutine
"""
parser = xml.parsers.expat.ParserCreate()
#parser.buffer_size = 65536
parser.buffer_text = True
# a leftover from the py2py3 migration - TO BE REMOVED
# parser.returns_unicode = False
parser.StartElementHandler = \
lambda name,attrs: target.send(('start',(name,attrs)))
parser.EndElementHandler = \
lambda name: target.send(('end',name))
parser.CharacterDataHandler = \
lambda data: target.send(('text',data))
parser.ParseFile(f)
@coroutine
def build(topNode):
"""
_build_
Node structure builder that is fed from the expat_parse method
"""
nodeStack = [topNode]
charCache = []
while True:
event, value = (yield)
if event == "start":
charCache = []
newnode = Node(value[0], value[1])
nodeStack[-1].children.append(newnode)
nodeStack.append(newnode)
elif event == "text":
charCache.append(value)
else: # end
nodeStack[-1].text = str(''.join(charCache)).strip()
nodeStack.pop()
charCache = [] | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Algorithms/ParseXMLFile.py | 0.592431 | 0.276608 | ParseXMLFile.py | pypi |
from __future__ import print_function, division
from builtins import str, range
import math
import decimal
import logging
from WMCore.WMException import WMException
class MathAlgoException(WMException):
"""
Some simple math algo exceptions
"""
pass
def getAverageStdDev(numList):
"""
_getAverageStdDev_
Given a list, calculate both the average and the
standard deviation.
"""
if len(numList) < 0:
# Nothing to do here
return 0.0, 0.0
total = 0.0
average = 0.0
stdBase = 0.0
# Assemble the average
skipped = 0
for value in numList:
try:
if math.isnan(value) or math.isinf(value):
skipped += 1
continue
else:
total += value
except TypeError:
msg = "Attempted to take average of non-numerical values.\n"
msg += "Expected int or float, got %s: %s" % (value.__class__, value)
logging.error(msg)
logging.debug("FullList: %s", numList)
raise MathAlgoException(msg)
length = len(numList) - skipped
if length < 1:
return average, total
average = total / length
for value in numList:
tmpValue = value - average
stdBase += (tmpValue * tmpValue)
stdDev = math.sqrt(stdBase / length)
if math.isnan(average) or math.isinf(average):
average = 0.0
if math.isnan(stdDev) or math.isinf(average) or not decimal.Decimal(str(stdDev)).is_finite():
stdDev = 0.0
if not isinstance(stdDev, (int, float)):
stdDev = 0.0
return average, stdDev
def createHistogram(numList, nBins, limit):
"""
_createHistogram_
Create a histogram proxy (a list of bins) for a
given list of numbers
"""
average, stdDev = getAverageStdDev(numList = numList)
underflow = []
overflow = []
histEvents = []
histogram = []
for value in numList:
if math.fabs(average - value) <= limit * stdDev:
# Then we counted this event
histEvents.append(value)
elif average < value:
overflow.append(value)
elif average > value:
underflow.append(value)
if len(underflow) > 0:
binAvg, binStdDev = getAverageStdDev(numList=underflow)
histogram.append({'type': 'underflow',
'average': binAvg,
'stdDev': binStdDev,
'nEvents': len(underflow)})
if len(overflow) > 0:
binAvg, binStdDev = getAverageStdDev(numList=overflow)
histogram.append({'type': 'overflow',
'average': binAvg,
'stdDev': binStdDev,
'nEvents': len(overflow)})
if len(histEvents) < 1:
# Nothing to do?
return histogram
histEvents.sort()
upperBound = max(histEvents)
lowerBound = min(histEvents)
if lowerBound == upperBound:
# This is a problem
logging.debug("Only one value in the histogram!")
nBins = 1
upperBound = upperBound + 1
lowerBound = lowerBound - 1
binSize = (upperBound - lowerBound)/nBins
binSize = floorTruncate(binSize)
for x in range(nBins):
lowerEdge = floorTruncate(lowerBound + (x * binSize))
histogram.append({'type': 'standard',
'lowerEdge': lowerEdge,
'upperEdge': lowerEdge + binSize,
'average': 0.0,
'stdDev': 0.0,
'nEvents': 0})
for bin_ in histogram:
if bin_['type'] != 'standard':
continue
binList = []
for value in histEvents:
if value >= bin_['lowerEdge'] and value <= bin_['upperEdge']:
# Then we're in the bin
binList.append(value)
elif value > bin_['upperEdge']:
# Because this is a sorted list we are now out of the bin range
# Calculate our values and break
break
else:
continue
# If we get here, it's because we're out of values in the bin
# Time to do some math
if len(binList) < 1:
# Nothing to do here, leave defaults
continue
binAvg, binStdDev = getAverageStdDev(numList=binList)
bin_['average'] = binAvg
bin_['stdDev'] = binStdDev
bin_['nEvents'] = len(binList)
return histogram
def floorTruncate(value, precision=3):
"""
_floorTruncate_
Truncate a value to a set number of decimal points
Always truncates to a LOWER value, this is so that using it for
histogram binning creates values beneath the histogram lower edge.
"""
prec = math.pow(10, precision)
return math.floor(value * prec)/prec
def sortDictionaryListByKey(dictList, key, reverse=False):
"""
_sortDictionaryListByKey_
Given a list of dictionaries and a key with a numerical
value, sort that dictionary in order of that key's value.
NOTE: If the key does not exist, this will not raise an exception
This is because this is used for sorting of performance histograms
And not all histograms have the same value
"""
return sorted(dictList, key=lambda k: float(k.get(key, 0.0)), reverse=reverse)
def getLargestValues(dictList, key, n=1):
"""
_getLargestValues_
Take a list of dictionaries, sort them by the value of a
particular key, and return the n largest entries.
Key must be a numerical key.
"""
sortedList = sortDictionaryListByKey(dictList=dictList, key=key, reverse=True)
return sortedList[:n]
def validateNumericInput(value):
"""
_validateNumericInput_
Check that the value is actually an usable number
"""
value = float(value)
try:
if math.isnan(value) or math.isinf(value):
return False
except TypeError:
return False
return True
def calculateRunningAverageAndQValue(newPoint, n, oldM, oldQ):
"""
_calculateRunningAverageAndQValue_
Use the algorithm described in:
Donald E. Knuth (1998). The Art of Computer Programming, volume 2: Seminumerical Algorithms, 3rd ed.., p. 232. Boston: Addison-Wesley.
To calculate an average and standard deviation while getting data, the standard deviation
can be obtained from the so-called Q value with the following equation:
sigma = sqrt(Q/n)
This is also contained in the function calculateStdDevFromQ in this module. The average is equal to M.
"""
if not validateNumericInput(newPoint): raise MathAlgoException("Provided a non-valid newPoint")
if not validateNumericInput(n): raise MathAlgoException("Provided a non-valid n")
if n == 1:
M = newPoint
Q = 0.0
else:
if not validateNumericInput(oldM): raise MathAlgoException("Provided a non-valid oldM")
if not validateNumericInput(oldQ): raise MathAlgoException("Provided a non-valid oldQ")
M = oldM + (newPoint - oldM) / n
Q = oldQ + ((n - 1) * (newPoint - oldM) * (newPoint - oldM) / n)
return M, Q
def calculateStdDevFromQ(Q, n):
"""
_calculateStdDevFromQ_
If Q is the sum of the squared differences of some points to their average,
then the standard deviation is given by:
sigma = sqrt(Q/n)
This function calculates that formula
"""
if not validateNumericInput(Q): raise MathAlgoException("Provided a non-valid Q")
if not validateNumericInput(n): raise MathAlgoException("Provided a non-valid n")
sigma = math.sqrt(Q / n)
if not validateNumericInput(sigma): return 0.0
return sigma | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/Algorithms/MathAlgos.py | 0.591841 | 0.33565 | MathAlgos.py | pypi |
from builtins import str as newstr
import random, cherrypy
class RESTError(Exception):
"""Base class for REST errors.
.. attribute:: http_code
Integer, HTTP status code for this error. Also emitted as X-Error-HTTP
header value.
.. attribute:: app_code
Integer, application error code, to be emitted as X-REST-Status header.
.. attribute:: message
String, information about the error, to be emitted as X-Error-Detail
header. Should not contain anything sensitive, and in particular should
never include any unvalidated or unsafe data, e.g. input parameters or
data from a database. Normally a fixed label with one-to-one match with
the :obj:`app-code`. If the text exceeds 200 characters, it's truncated.
Since this is emitted as a HTTP header, it cannot contain newlines or
anything encoding-dependent.
.. attribute:: info
String, additional information beyond :obj:`message`, to be emitted as
X-Error-Info header. Like :obj:`message` should not contain anything
sensitive or unsafe, or text inappropriate for a HTTP response header,
and should be short enough to fit in 200 characters. This is normally
free form text to clarify why the error happened.
.. attribute:: errid
String, random unique identifier for this error, to be emitted as
X-Error-ID header and output into server logs when logging the error.
The purpose is that clients save this id when they receive an error,
and further error reporting or debugging can use this value to identify
the specific error, and for example to grep logs for more information.
.. attribute:: errobj
If the problem was caused by another exception being raised in the code,
reference to the original exception object. For example if the code dies
with an :class:`KeyError`, this is the original exception object. This
error is logged to the server logs when reporting the error, but no
information about it is returned to the HTTP client.
.. attribute:: trace
The origin of the exception as returned by :func:`format_exc`. The full
trace is emitted to the server logs, each line prefixed with timestamp.
This information is not returned to the HTTP client.
"""
http_code = None
app_code = None
message = None
info = None
errid = None
errobj = None
trace = None
def __init__(self, info = None, errobj = None, trace = None):
self.errid = "%032x" % random.randrange(1 << 128)
self.errobj = errobj
self.info = info
self.trace = trace
def __str__(self):
return "%s %s [HTTP %d, APP %d, MSG %s, INFO %s, ERR %s]" \
% (self.__class__.__name__, self.errid, self.http_code, self.app_code,
repr(self.message).replace("\n", " ~~ "),
repr(self.info).replace("\n", " ~~ "),
repr(self.errobj).replace("\n", " ~~ "))
class NotAcceptable(RESTError):
"Client did not specify format it accepts, or no compatible format was found."
http_code = 406
app_code = 201
message = "Not acceptable"
class UnsupportedMethod(RESTError):
"Client used HTTP request method which isn't supported for any API call."
http_code = 405
app_code = 202
message = "Request method not supported"
class MethodWithoutQueryString(RESTError):
"Client provided a query string which isn't acceptable for this request method."
http_code = 405
app_code = 203
message = "Query arguments not supported for this request method"
class APIMethodMismatch(RESTError):
"""Both the API and HTTP request methods are supported, but not in that
combination."""
http_code = 405
app_code = 204
message = "API not supported for this request method"
class APINotSpecified(RESTError):
"The request URL is missing API argument."
http_code = 400
app_code = 205
message = "API not specified"
class NoSuchInstance(RESTError):
"""The request URL is missing instance argument or the specified instance
does not exist."""
http_code = 404
app_code = 206
message = "No such instance"
class APINotSupported(RESTError):
"The request URL provides wrong API argument."
http_code = 404
app_code = 207
message = "API not supported"
class DataCacheEmpty(RESTError):
"The wmstats data cache has not be created."
http_code = 503
app_code = 208
message = "DataCache is Empty"
class DatabaseError(RESTError):
"""Parent class for database-related errors.
.. attribute: lastsql
A tuple of *(sql, binds, kwbinds),* where `sql` is the last SQL statement
executed and `binds`, `kwbinds` are the bind values used with it. Any
sensitive parts like passwords have already been censored from the `sql`
string. Note that for massive requests `binds` or `kwbinds` can get large.
These are logged out in the server logs when reporting the error, but no
information about these are returned to the HTTP client.
.. attribute: intance
String, the database instance for which the error occurred. This is
reported in the error message output to server logs, but no information
about this is returned to the HTTP client."""
lastsql = None
instance = None
def __init__(self, info = None, errobj = None, trace = None,
lastsql = None, instance = None):
RESTError.__init__(self, info, errobj, trace)
self.lastsql = lastsql
self.instance = instance
class DatabaseUnavailable(DatabaseError):
"""The instance argument is correct, but cannot connect to the database.
This error will only occur at initial attempt to connect to the database,
:class:`~.DatabaseConnectionError` is raised instead if the connection
ends prematurely after the transaction has already begun successfully."""
http_code = 503
app_code = 401
message = "Database unavailable"
class DatabaseConnectionError(DatabaseError):
"""Database was available when the operation started, but the connection
was lost or otherwise failed during the application operation."""
http_code = 504
app_code = 402
message = "Database connection failure"
class DatabaseExecutionError(DatabaseError):
"""Database operation failed."""
http_code = 500
app_code = 403
message = "Execution error"
class MissingParameter(RESTError):
"Client did not supply a parameter which is required."
http_code = 400
app_code = 301
message = "Missing required parameter"
class InvalidParameter(RESTError):
"Client supplied invalid value for a parameter."
http_code = 400
app_code = 302
message = "Invalid input parameter"
class MissingObject(RESTError):
"""An object required for the operation is missing. This might be a
pre-requisite needed to create a reference, or attempt to delete
an object which does not exist."""
http_code = 400
app_code = 303
message = "Required object is missing"
class TooManyObjects(RESTError):
"""Too many objects matched specified criteria. Usually this means
more than one object was matched, deleted, or inserted, when only
exactly one should have been subject to the operation."""
http_code = 400
app_code = 304
message = "Too many objects"
class ObjectAlreadyExists(RESTError):
"""An already existing object is on the way of the operation. This
is usually caused by uniqueness constraint violations when creating
new objects."""
http_code = 400
app_code = 305
message = "Object already exists"
class InvalidObject(RESTError):
"The specified object is invalid."
http_code = 400
app_code = 306
message = "Invalid object"
class ExecutionError(RESTError):
"""Input was in principle correct but there was an error processing
the request. This normally means either programming error, timeout, or
an unusual and unexpected problem with the database. For security reasons
little additional information is returned. If the problem persists, client
should contact service operators. The returned error id can be used as a
reference."""
http_code = 500
app_code = 403
message = "Execution error"
def report_error_header(header, val):
"""If `val` is non-empty, set CherryPy response `header` to `val`.
Replaces all newlines with "; " characters. If the resulting value is
longer than 200 characters, truncates it to the first 197 characters
and leaves a trailing ellipsis "..."."""
if val:
val = val.replace("\n", "; ")
if len(val) > 200: val = val[:197] + "..."
cherrypy.response.headers[header] = val
def report_rest_error(err, trace, throw):
"""Report a REST error: generate an appropriate log message, set the
response headers and raise an appropriate :class:`~.HTTPError`.
Normally `throw` would be True to translate the exception `err` into
a HTTP server error, but the function can also be called with `throw`
set to False if the purpose is merely to log an exception message.
:arg err: exception object.
:arg trace: stack trace to use in case `err` doesn't have one.
:arg throw: raise a :class:`~.HTTPError` if True."""
if isinstance(err, DatabaseError) and err.errobj:
offset = None
sql, binds, kwbinds = err.lastsql
if sql and err.errobj.args and hasattr(err.errobj.args[0], 'offset'):
offset = err.errobj.args[0].offset
sql = sql[:offset] + "<**>" + sql[offset:]
cherrypy.log("SERVER DATABASE ERROR %d/%d %s %s.%s %s [instance: %s] (%s);"
" last statement: %s; binds: %s, %s; offset: %s"
% (err.http_code, err.app_code, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
err.errid, err.instance, newstr(err.errobj).rstrip(),
sql, binds, kwbinds, offset))
for line in err.trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, RESTError):
if err.errobj:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s); derived from %s.%s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message,
getattr(err.errobj, "__module__", "__builtins__"),
err.errobj.__class__.__name__,
newstr(err.errobj).rstrip()))
trace = err.trace
else:
cherrypy.log("SERVER REST ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
err.errid, err.message))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(err.app_code)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.http_code)
cherrypy.response.headers["X-Error-ID"] = err.errid
report_error_header("X-Error-Detail", err.message)
report_error_header("X-Error-Info", err.info)
if throw: raise cherrypy.HTTPError(err.http_code, err.message)
elif isinstance(err, cherrypy.HTTPError):
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER HTTP ERROR %s.%s %s (%s)"
% (err.__module__, err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = newstr(200)
cherrypy.response.headers["X-Error-HTTP"] = newstr(err.status)
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", err._message)
if throw: raise err
else:
errid = "%032x" % random.randrange(1 << 128)
cherrypy.log("SERVER OTHER ERROR %s.%s %s (%s)"
% (getattr(err, "__module__", "__builtins__"),
err.__class__.__name__,
errid, newstr(err).rstrip()))
for line in trace.rstrip().split("\n"): cherrypy.log(" " + line)
cherrypy.response.headers["X-REST-Status"] = 400
cherrypy.response.headers["X-Error-HTTP"] = 500
cherrypy.response.headers["X-Error-ID"] = errid
report_error_header("X-Error-Detail", "Server error")
if throw: raise cherrypy.HTTPError(500, "Server error") | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/REST/Error.py | 0.835752 | 0.247783 | Error.py | pypi |
from builtins import str as newstr, bytes as newbytes
from WMCore.REST.Error import *
import math
import re
import numbers
from Utils.Utilities import decodeBytesToUnicodeConditional, encodeUnicodeToBytesConditional
from Utils.PythonVersion import PY3, PY2
def return_message(main_err, custom_err):
if custom_err:
return custom_err
return main_err
def _arglist(argname, kwargs):
val = kwargs.get(argname, None)
if val == None:
return []
elif not isinstance(val, list):
return [ val ]
else:
return val
def _check_rx(argname, val, custom_err = None):
if not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
return re.compile(val)
except:
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
def _check_str(argname, val, rx, custom_err = None):
"""
This is not really check val is ASCII.
2021 09: we are now using version 17.4.0 -> we do not need to convert to
bytes here anymore, we are using a recent verison of cherrypy.
We merged the funcionality of _check_str and _check_ustr into a single function
:type val: str or bytes (only utf8 encoded string) in py3, unicode or str in py2
:type rx: regex, compiled from native str (unicode in py3, bytes in py2)
"""
val = decodeBytesToUnicodeConditional(val, condition=PY3)
val = encodeUnicodeToBytesConditional(val, condition=PY2)
# `val` should now be a "native str" (unicode in py3, bytes in py2)
# here str has not been redefined. it is default `str` in both py2 and py3.
if not isinstance(val, str) or not rx.match(val):
raise InvalidParameter(return_message("Incorrect '%s' parameter %s %s" % (argname, type(val), val), custom_err))
return val
def _check_num(argname, val, bare, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Integral) and (not isinstance(val, (newstr, newbytes)) or (bare and not val.isdigit())):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = int(val)
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _check_real(argname, val, special, minval, maxval, custom_err = None):
if not isinstance(val, numbers.Number) and not isinstance(val, (newstr, newbytes)):
raise InvalidParameter(return_message("Incorrect '%s' parameter" % argname, custom_err))
try:
n = float(val)
if not special and (math.isnan(n) or math.isinf(n)):
raise InvalidParameter(return_message("Parameter '%s' improper value" % argname, custom_err))
if (minval != None and n < minval) or (maxval != None and n > maxval):
raise InvalidParameter(return_message("Parameter '%s' value out of bounds" % argname, custom_err))
return n
except InvalidParameter:
raise
except:
raise InvalidParameter(return_message("Invalid '%s' parameter" % argname, custom_err))
def _validate_one(argname, param, safe, checker, optional, *args):
val = param.kwargs.get(argname, None)
if optional and val == None:
safe.kwargs[argname] = None
else:
safe.kwargs[argname] = checker(argname, val, *args)
del param.kwargs[argname]
def _validate_all(argname, param, safe, checker, *args):
safe.kwargs[argname] = [checker(argname, v, *args) for v in _arglist(argname, param.kwargs)]
if argname in param.kwargs:
del param.kwargs[argname]
def validate_rx(argname, param, safe, optional = False, custom_err = None):
"""Validates that an argument is a valid regexp.
Checks that an argument named `argname` exists in `param.kwargs`,
and it a string which compiles into a python regular expression.
If successful, the regexp object (not the string) is copied into
`safe.kwargs` and the string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_rx, optional, custom_err)
def validate_str(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
Accepts both unicode strings and utf8-encoded bytes strings as argument
string.
Accepts regex compiled only with "native strings", which means str in both
py2 and py3 (unicode in py3, bytes of utf8-encoded strings in py2)
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_ustr(argname, param, safe, rx, optional = False, custom_err = None):
"""Validates that an argument is a string and matches a regexp,
During the py2->py3 modernization, _check_str and _check_ustr have been
merged into a single function called _check_str.
This function is now the same as validate_str, but is kept nonetheless
not to break our client's code.
Checks that an argument named `argname` exists in `param.kwargs`
and it is a string which matches regular expression `rx`. If
successful the string is copied into `safe.kwargs` and the value
is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception."""
_validate_one(argname, param, safe, _check_str, optional, rx, custom_err)
def validate_num(argname, param, safe, optional = False,
bare = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid integer number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is an int or a string convertible to a valid number. If successful
the integer value (not the string) is copied into `safe.kwargs`
and the original int/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
If `bare` is True, the number is required to be a pure digit sequence if it is a string.
Otherwise anything accepted by `int(val)` is acceted, including for
example leading white space or sign. Note that either way arbitrarily
large values are accepted; if you want to prevent abuse against big
integers, use the `minval` and `maxval` thresholds described below,
or check the length the of the string against some limit first.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_num, optional, bare, minval, maxval, custom_err)
def validate_real(argname, param, safe, optional = False,
special = False, minval = None, maxval = None, custom_err = None):
"""Validates that an argument is a valid real number.
Checks that an argument named `argname` exists in `param.kwargs`,
and it is float number or a string convertible to a valid number. If successful
the float value (not the string) is copied into `safe.kwargs`
and the original float/string value is removed from `param.kwargs`.
If `optional` is True, the argument is not required to exist in
`param.kwargs`; None is then inserted into `safe.kwargs`. Otherwise
a missing value raises an exception.
Anything accepted by `float(val)` is accepted, including for example
leading white space, sign and exponent. However NaN and +/- Inf are
rejected unless `special` is True.
If `minval` or `maxval` are given, values less than or greater than,
respectively, the threshold are rejected."""
_validate_one(argname, param, safe, _check_real, optional, special, minval, maxval, custom_err)
def validate_rxlist(argname, param, safe, custom_err = None):
"""Validates that an argument is an array of strings, each of which
can be compiled into a python regexp object.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which compiles into a regular expression.
If successful the array is copied into `safe.kwargs` and the value is
removed from `param.kwargs`. The value always becomes an array in
`safe.kwargs`, even if no or only one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_rx, custom_err)
def validate_strlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_ustrlist` instead if the argument string might need
to be converted from utf-8 into unicode first. Use this method only
for inputs which are meant to be bare strings.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_str, rx, custom_err)
def validate_ustrlist(argname, param, safe, rx, custom_err = None):
"""Validates that an argument is an array of strings, each of which
matches a regexp once converted from utf-8 into unicode.
Checks that an argument named `argname` is either a single string or
an array of strings, each of which matches the regular expression
`rx`. If successful the array is copied into `safe.kwargs` and the
value is removed from `param.kwargs`. The value always becomes an
array in `safe.kwargs`, even if no or only one argument was provided.
Use `validate_strlist` instead if the argument strings should always
be bare strings. This one automatically converts everything into
unicode and expects input exclusively in utf-8, which may not be
appropriate constraints for some uses.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_ustr, rx, custom_err)
def validate_numlist(argname, param, safe, bare=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_num()`.
Checks that an argument named `argname` is either a single string/int or
an array of strings/int, each of which validates with `validate_num` and
`bare`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `kwsafe`, even if no or only one
argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_num, bare, minval, maxval, custom_err)
def validate_reallist(argname, param, safe, special=False, minval=None, maxval=None, custom_err = None):
"""Validates that an argument is an array of integers, as checked by
`validate_real()`.
Checks that an argument named `argname` is either a single string/float or
an array of strings/floats, each of which validates with `validate_real` and
`special`, `minval` and `maxval` arguments. If successful the array is
copied into `safe.kwargs` and the value is removed from `param.kwargs`.
The value always becomes an array in `safe.kwargs`, even if no or only
one argument was provided.
Note that an array of zero length is accepted, meaning there were no
`argname` parameters at all in `param.kwargs`."""
_validate_all(argname, param, safe, _check_real, special, minval, maxval, custom_err)
def validate_no_more_input(param):
"""Verifies no more input is left in `param.args` or `param.kwargs`."""
if param.args:
raise InvalidParameter("Excess path arguments, not validated args='%s'" % param.args)
if param.kwargs:
raise InvalidParameter("Excess keyword arguments, not validated kwargs='%s'" % param.kwargs)
def validate_lengths(safe, *names):
"""Verifies that all `names` exist in `safe.kwargs`, are lists, and
all the lists have the same length. This is convenience function for
checking that an API accepting multiple values receives equal number
of values for all of its parameters."""
refname = names[0]
if refname not in safe.kwargs or not isinstance(safe.kwargs[refname], list):
raise InvalidParameter("Incorrect '%s' parameter" % refname)
reflen = len(safe.kwargs[refname])
for other in names[1:]:
if other not in safe.kwargs or not isinstance(safe.kwargs[other], list):
raise InvalidParameter("Incorrect '%s' parameter" % other)
elif len(safe.kwargs[other]) != reflen:
raise InvalidParameter("Mismatched number of arguments: %d %s vs. %d %s"
% (reflen, refname, len(safe.kwargs[other]), other)) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/REST/Validation.py | 0.600774 | 0.233335 | Validation.py | pypi |
from __future__ import print_function
from builtins import str, bytes, object
from Utils.PythonVersion import PY3
from Utils.Utilities import encodeUnicodeToBytes, encodeUnicodeToBytesConditional
from future.utils import viewitems
import hashlib
import json
import xml.sax.saxutils
import zlib
from traceback import format_exc
import cherrypy
from WMCore.REST.Error import RESTError, ExecutionError, report_rest_error
try:
from cherrypy.lib import httputil
except ImportError:
from cherrypy.lib import http as httputil
def vary_by(header):
"""Add 'Vary' header for `header`."""
varies = cherrypy.response.headers.get('Vary', '')
varies = [x.strip() for x in varies.split(",") if x.strip()]
if header not in varies:
varies.append(header)
cherrypy.response.headers['Vary'] = ", ".join(varies)
def is_iterable(obj):
"""Check if `obj` is iterable."""
try:
iter(obj)
except TypeError:
return False
else:
return True
class RESTFormat(object):
def __call__(self, stream, etag):
"""Main entry point for generating output for `stream` using `etag`
object to generate ETag header. Returns a generator function for
producing a verbatim copy of `stream` item, including any premables
and trailers needed for the selected format. The intention is that
the caller will use the iterable to generate chunked HTTP transfer
encoding, or a simple result such as an image."""
# Make 'stream' iterable. We convert everything to chunks here.
# The final stream consumer will collapse small responses back
# to a single string. Convert files to 1MB chunks.
if stream is None:
stream = ['']
elif isinstance(stream, (str, bytes)):
stream = [stream]
elif hasattr(stream, "read"):
# types.FileType is not available anymore in python3,
# using it raises pylint W1624.
# Since cherrypy.lib.file_generator only uses the .read() attribute
# of a file, we simply check if stream.read() is present instead.
# https://github.com/cherrypy/cherrypy/blob/2a8aaccd649eb1011382c39f5cd93f76f980c0b1/cherrypy/lib/__init__.py#L64
stream = cherrypy.lib.file_generator(stream, 512 * 1024)
return self.stream_chunked(stream, etag, *self.chunk_args(stream))
def chunk_args(self, stream):
"""Return extra arguments needed for `stream_chunked()`. The default
return an empty tuple, so no extra arguments. Override in the derived
class if `stream_chunked()` needs preamble or trailer arguments."""
return tuple()
class XMLFormat(RESTFormat):
"""Format an iterable of objects into XML encoded in UTF-8.
Generates normally first a preamble, a stream of XML-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then XML encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if iterating over input is
deterministic. Beware in particular the key order for a dict is
arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is generated as an XML document whose top-level entity name
is defined by the label given at the formatter construction time. The
caller must define ``cherrypy.request.rest_generate_data`` to element
name for wrapping stream contents. Usually the top-level entity is the
application name and the ``cherrypy.request.rest_generate_data`` is
``result``.
Iterables are output as ``<array><i>ITEM</i><i>ITEM</i></array>``,
dictionaries as ``<dict><key>KEY</key><value>VALUE</value></dict>``.
`None` is output as empty contents, and hence there is no way to
distinguish `None` and an empty string from each other. Scalar types
are output as rendered by `str()`, but obviously XML encoding unsafe
characters. This class does not support formatting arbitrary types.
The formatter does not insert any spaces into the output. Although the
output is generated as a preamble, stream of objects, and trailer just
like by the `JSONFormatter`, each of which is a separate HTTP transfer
chunk, the output does *not* have guaranteed line-oriented structure
like the `JSONFormatter` produces. Note in particular that if the data
stream contains strings with newlines, the output will have arbitrary
line structure. On the other hand, as the output is well-formed XML,
virtually all SAX processors can read the stream incrementally even if
the client isn't able to fully preserve chunked HTTP transfer encoding."""
def __init__(self, label):
self.label = label
@staticmethod
def format_obj(obj):
"""Render an object `obj` into XML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
result = xml.sax.saxutils.escape(obj).encode("utf-8")
elif isinstance(obj, bytes):
result = xml.sax.saxutils.escape(obj)
elif isinstance(obj, (int, float, bool)):
result = xml.sax.saxutils.escape(str(obj)).encode("utf-8")
elif isinstance(obj, dict):
result = "<dict>"
for k, v in viewitems(obj):
result += "<key>%s</key><value>%s</value>" % \
(xml.sax.saxutils.escape(k).encode("utf-8"),
XMLFormat.format_obj(v))
result += "</dict>"
elif is_iterable(obj):
result = "<array>"
for v in obj:
result += "<i>%s</i>" % XMLFormat.format_obj(v)
result += "</array>"
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = XMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n"
preamble += "<%s>" % self.label
if cherrypy.request.rest_generate_preamble:
desc = self.format_obj(cherrypy.request.rest_generate_preamble)
preamble += "<desc>%s</desc>" % desc
preamble += "<%s>" % cherrypy.request.rest_generate_data
trailer = "</%s></%s>" % (cherrypy.request.rest_generate_data, self.label)
return preamble, trailer
class JSONFormat(RESTFormat):
"""Format an iterable of objects into JSON.
Generates normally first a preamble, a stream of JSON-rendered objects,
then the trailer, computing an ETag on the output string in the process.
This is designed exclusively for use with iterables for chunked transfer
encoding HTTP responses; it's not a general purpose formatting utility.
Outputs first a preamble, then JSON encoded output of input stream, and
finally a trailer. Any exceptions raised by input stream are reported to
`report_rest_error` and swallowed, as this is normally used to generate
output for CherryPy responses, which cannot handle exceptions reasonably
after the output generation begins; later processing may reconvert those
back to exceptions however (cf. stream_maybe_etag()). Once the preamble
has been emitted, the trailer is also emitted even if the input stream
raises an exception, in order to make the output well-formed; the client
must inspect the X-REST-Status trailer header to find out if it got the
complete output. No ETag header is generated in case of an exception.
The ETag generation is deterministic only if `cjson.encode()` output is
deterministic for the input. Beware in particular the key order for a
dict is arbitrary and may differ for two semantically identical dicts.
A X-REST-Status trailer header is added only in case of error. There is
normally 'X-REST-Status: 100' in normal response headers, and it remains
valid in case of success.
The output is always generated as a JSON dictionary. The caller must
define ``cherrypy.request.rest_generate_data`` as the key for actual
contents, usually something like "result". The `stream` value will be
generated as an array value for that key.
If ``cherrypy.request.rest_generate_preamble`` is a non-empty list, it
is output as the ``desc`` key value in the preamble before outputting
the `stream` contents. Otherwise the output consists solely of `stream`.
A common use of ``rest_generate_preamble`` is list of column labels
with `stream` an iterable of lists of column values.
The output is guaranteed to contain one line of preamble which starts a
dictionary and an array ("``{key: [``"), one line of JSON rendering of
each object in `stream`, with the first line starting with exactly one
space and second and subsequent lines starting with a comma, and one
final trailer line consisting of "``]}``". Each line is generated as a
HTTP transfer chunk. This format is fixed so readers can be constructed
to read and parse the stream incrementally one line at a time,
facilitating maximum throughput processing of the response."""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = comma + json.dumps(obj) + "\n"
etag.update(chunk)
yield chunk
comma = ","
except GeneratorExit:
etag.invalidate()
trailer = None
raise
except Exception as exp:
print("ERROR, json.dumps failed to serialize %s, type %s\nException: %s" \
% (obj, type(obj), str(exp)))
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as JSON reply."""
comma = ""
preamble = "{"
trailer = "]}\n"
if cherrypy.request.rest_generate_preamble:
desc = json.dumps(cherrypy.request.rest_generate_preamble)
preamble += '"desc": %s' % desc
comma = ", "
preamble += '%s"%s": [\n' % (comma, cherrypy.request.rest_generate_data)
return preamble, trailer
class PrettyJSONFormat(JSONFormat):
""" Format used for human, (web browser)"""
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
comma = " "
try:
if preamble:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = comma + json.dumps(obj, indent=2)
etag.update(chunk)
yield chunk
comma = ","
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
cherrypy.response.headers["X-REST-Status"] = 100
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
class PrettyJSONHTMLFormat(PrettyJSONFormat):
""" Format used for human, (web browser) wrap around html tag on json"""
@staticmethod
def format_obj(obj):
"""Render an object `obj` into HTML."""
if isinstance(obj, type(None)):
result = ""
elif isinstance(obj, str):
obj = xml.sax.saxutils.quoteattr(obj)
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, bytes):
obj = xml.sax.saxutils.quoteattr(str(obj, "utf-8"))
result = "<pre>%s</pre>" % obj if '\n' in obj else obj
elif isinstance(obj, (int, float, bool)):
result = "%s" % obj
elif isinstance(obj, dict):
result = "<ul>"
for k, v in viewitems(obj):
result += "<li><b>%s</b>: %s</li>" % (k, PrettyJSONHTMLFormat.format_obj(v))
result += "</ul>"
elif is_iterable(obj):
empty = True
result = "<details open><ul>"
for v in obj:
empty = False
result += "<li>%s</li>" % PrettyJSONHTMLFormat.format_obj(v)
result += "</ul></details>"
if empty:
result = ""
else:
cherrypy.log("cannot represent object of type %s in xml (%s)"
% (type(obj).__class__.__name__, repr(obj)))
raise ExecutionError("cannot represent object in xml")
return result
def stream_chunked(self, stream, etag, preamble, trailer):
"""Generator for actually producing the output."""
try:
etag.update(preamble)
yield preamble
try:
for obj in stream:
chunk = PrettyJSONHTMLFormat.format_obj(obj)
etag.update(chunk)
yield chunk
except GeneratorExit:
etag.invalidate()
trailer = None
raise
finally:
if trailer:
etag.update(trailer)
yield trailer
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
def chunk_args(self, stream):
"""Return header and trailer needed to wrap `stream` as XML reply."""
preamble = "<html><body>"
trailer = "</body></html>"
return preamble, trailer
class RawFormat(RESTFormat):
"""Format an iterable of objects as raw data.
Generates raw data completely unmodified, for example image data or
streaming arbitrary external data files including even plain text.
Computes an ETag on the output in the process. The result is always
chunked, even simple strings on input. Usually small enough responses
will automatically be converted back to a single string response post
compression and ETag processing.
Any exceptions raised by input stream are reported to `report_rest_error`
and swallowed, as this is normally used to generate output for CherryPy
responses, which cannot handle exceptions reasonably after the output
generation begins; later processing may reconvert those back to exceptions
however (cf. stream_maybe_etag()). A X-REST-Status trailer header is added
if (and only if) an exception occurs; the client must inspect that to find
out if it got the complete output. There is normally 'X-REST-Status: 100'
in normal response headers, and it remains valid in case of success.
No ETag header is generated in case of an exception."""
def stream_chunked(self, stream, etag):
"""Generator for actually producing the output."""
try:
for chunk in stream:
etag.update(chunk)
yield chunk
except RESTError as e:
etag.invalidate()
report_rest_error(e, format_exc(), False)
except Exception as e:
etag.invalidate()
report_rest_error(ExecutionError(), format_exc(), False)
except BaseException:
etag.invalidate()
raise
class DigestETag(object):
"""Compute hash digest over contents for ETag header."""
algorithm = None
def __init__(self, algorithm=None):
"""Prepare ETag computer."""
self.digest = hashlib.new(algorithm or self.algorithm)
def update(self, val):
"""Process response data `val`."""
if self.digest:
self.digest.update(encodeUnicodeToBytes(val))
def value(self):
"""Return ETag header value for current input."""
return self.digest and '"%s"' % self.digest.hexdigest()
def invalidate(self):
"""Invalidate the ETag calculator so value() will return None."""
self.digest = None
class MD5ETag(DigestETag):
"""Compute MD5 hash over contents for ETag header."""
algorithm = 'md5'
class SHA1ETag(DigestETag):
"""Compute SHA1 hash over contents for ETag header."""
algorithm = 'sha1'
def _stream_compress_identity(reply, *args):
"""Streaming compressor which returns original data unchanged."""
return reply
def _stream_compress_deflate(reply, compress_level, max_chunk):
"""Streaming compressor for the 'deflate' method. Generates output that
is guaranteed to expand at the exact same chunk boundaries as original
reply stream."""
# Create zlib compression object, with raw data stream (negative window size)
z = zlib.compressobj(compress_level, zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
# Data pending compression. We only take entire chunks from original
# reply. Then process reply one chunk at a time. Whenever we have enough
# data to compress, spit it out flushing the zlib engine entirely, so we
# respect original chunk boundaries.
npending = 0
pending = []
for chunk in reply:
pending.append(chunk)
npending += len(chunk)
if npending >= max_chunk:
part = z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FULL_FLUSH)
pending = []
npending = 0
yield part
# Crank the compressor one more time for remaining output.
if npending:
yield z.compress(encodeUnicodeToBytes("".join(pending))) + z.flush(zlib.Z_FINISH)
# : Stream compression methods.
_stream_compressor = {
'identity': _stream_compress_identity,
'deflate': _stream_compress_deflate
}
def stream_compress(reply, available, compress_level, max_chunk):
"""If compression has been requested via Accept-Encoding request header,
and is granted for this response via `available` compression methods,
convert the streaming `reply` into another streaming response which is
compressed at the exact chunk boundaries of the original response,
except that individual chunks may be coalesced up to `max_chunk` size.
The `compression_level` tells how hard to compress, zero disables the
compression entirely."""
global _stream_compressor
for enc in cherrypy.request.headers.elements('Accept-Encoding'):
if enc.value not in available:
continue
elif enc.value in _stream_compressor and compress_level > 0:
# Add 'Vary' header for 'Accept-Encoding'.
vary_by('Accept-Encoding')
# Compress contents at original chunk boundaries.
if 'Content-Length' in cherrypy.response.headers:
del cherrypy.response.headers['Content-Length']
cherrypy.response.headers['Content-Encoding'] = enc.value
return _stream_compressor[enc.value](reply, compress_level, max_chunk)
return reply
def _etag_match(status, etagval, match, nomatch):
"""Match ETag value against any If-Match / If-None-Match headers."""
# Execute conditions only for status 2xx. We only handle GET/HEAD
# requests here, it makes no sense to try to do this for PUT etc.
# as they need to be handled as request pre-condition, not in the
# streaming out part here.
if cherrypy.request.method in ('GET', 'HEAD'):
status, dummyReason, dummyMsg = httputil.valid_status(status)
if status >= 200 and status <= 299:
if match and ("*" in match or etagval in match):
raise cherrypy.HTTPError(412, "Precondition on ETag %s failed" % etagval)
if nomatch and ("*" in nomatch or etagval in nomatch):
raise cherrypy.HTTPRedirect([], 304)
def _etag_tail(head, tail, etag):
"""Generator which first returns anything in `head`, then `tail`.
Sets ETag header at the end to value of `etag` if it's defined and
yields a value."""
for chunk in head:
yield encodeUnicodeToBytes(chunk)
for chunk in tail:
yield encodeUnicodeToBytes(chunk)
etagval = (etag and etag.value())
if etagval:
cherrypy.response.headers["ETag"] = etagval
def stream_maybe_etag(size_limit, etag, reply):
"""Maybe generate ETag header for the response, and handle If-Match
and If-None-Match request headers. Consumes the reply until at most
`size_limit` bytes. If the response fits into that size, adds the
ETag header and matches it against any If-Match / If-None-Match
request headers and replies appropriately.
If the response is fully buffered, and the `reply` generator actually
results in an error and sets X-Error-HTTP / X-Error-Detail headers,
converts that error back into a real HTTP error response. Otherwise
responds with the fully buffered body directly, without generator
and chunking. In other words, responses smaller than `size_limit`
are always fully buffered and replied immediately without chunking.
If the response is not fully buffered, it's guaranteed to be output
at original chunk boundaries.
Note that if this function is fed the output from `stream_compress()`
as it normally would be, the `size_limit` constrains the compressed
size, and chunk boundaries correspond to compressed chunks."""
req = cherrypy.request
res = cherrypy.response
match = [str(x) for x in (req.headers.elements('If-Match') or [])]
nomatch = [str(x) for x in (req.headers.elements('If-None-Match') or [])]
# If ETag is already set, match conditions and output without buffering.
etagval = res.headers.get('ETag', None)
if etagval:
_etag_match(res.status or 200, etagval, match, nomatch)
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail([], reply, None)
# Buffer up to size_limit bytes internally. This interally builds up the
# ETag value inside 'etag'. In case of exceptions the ETag invalidates.
# If we exceed the limit, fall back to streaming without checking ETag
# against If-Match/If-None-Match. We'll still set the ETag in the trailer
# headers, so clients which understand trailers will get the value; most
# clients including browsers will ignore them.
size = 0
result = []
for chunk in reply:
result.append(chunk)
size += len(chunk)
if size > size_limit:
res.headers['Trailer'] = 'X-REST-Status'
return _etag_tail(result, reply, etag)
# We've buffered the entire response, but it may be an error reply. The
# generator code does not know if it's allowed to raise exceptions, so
# it swallows all errors and converts them into X-* headers. We recover
# the original HTTP response code and message from X-Error-{HTTP,Detail}
# headers, if any are present.
err = res.headers.get('X-Error-HTTP', None)
if err:
message = res.headers.get('X-Error-Detail', 'Original error lost')
raise cherrypy.HTTPError(int(err), message)
# OK, we buffered the entire reply and it's ok. Check ETag match criteria.
# The original stream generator must guarantee that if it fails it resets
# the 'etag' value, even if the error handlers above didn't run.
etagval = etag.value()
if etagval:
res.headers['ETag'] = etagval
_etag_match(res.status or 200, etagval, match, nomatch)
# OK, respond with the buffered reply as a plain string.
res.headers['Content-Length'] = size
# TODO investigate why `result` is a list of bytes strings in py3
# The current solution seems to work in both py2 and py3
resp = b"" if PY3 else ""
for item in result:
resp += encodeUnicodeToBytesConditional(item, condition=PY3)
assert len(resp) == size
return resp | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/REST/Format.py | 0.852997 | 0.222531 | Format.py | pypi |
from Utils.Utilities import encodeUnicodeToBytes
from future.utils import viewitems, viewvalues, listitems
import os, hmac, hashlib, cherrypy
from tempfile import NamedTemporaryFile
from Utils.PythonVersion import PY3
from WMCore.REST.Main import RESTMain
from WMCore.REST.Auth import authz_canonical
from WMCore.Configuration import Configuration
def fake_authz_headers(hmac_key, method = 'HNLogin',
login='testuser', name='Test User',
dn="/test/dn", roles={}, format="list"):
"""Create fake authentication and authorisation headers compatible
with the CMSWEB front-ends. Assumes you have the HMAC signing key
the back-end will use to validate the headers.
:arg str hmac_key: binary key data for signing headers.
:arg str method: authentication method, one of X509Cert, X509Proxy,
HNLogin, HostIP, AUCookie or None.
:arg str login: account login name.
:arg str name: account user name.
:arg str dn: account X509 subject.
:arg dict roles: role dictionary, each role with 'site' and 'group' lists.
:returns: list of header name, value tuples to add to a HTTP request."""
headers = { 'cms-auth-status': 'OK', 'cms-authn-method': method }
if login:
headers['cms-authn-login'] = login
if name:
headers['cms-authn-name'] = name
if dn:
headers['cms-authn-dn'] = dn
for name, role in viewitems(roles):
name = 'cms-authz-' + authz_canonical(name)
headers[name] = []
for r in 'site', 'group':
if r in role:
headers[name].extend(["%s:%s" % (r, authz_canonical(v)) for v in role[r]])
headers[name] = " ".join(headers[name])
prefix = suffix = ""
hkeys = list(headers)
for hk in sorted(hkeys):
if hk != 'cms-auth-status':
prefix += "h%xv%x" % (len(hk), len(headers[hk]))
suffix += "%s%s" % (hk, headers[hk])
msg = prefix + "#" + suffix
if PY3:
hmac_key = encodeUnicodeToBytes(hmac_key)
msg = encodeUnicodeToBytes(msg)
cksum = hmac.new(hmac_key, msg, hashlib.sha1).hexdigest()
headers['cms-authn-hmac'] = cksum
if format == "list":
return listitems(headers)
else:
return headers
def fake_authz_key_file(delete=True):
"""Create temporary file for fake authorisation hmac signing key.
:returns: Instance of :class:`~.NamedTemporaryFile`, whose *data*
attribute contains the HMAC signing binary key."""
t = NamedTemporaryFile(delete=delete)
with open("/dev/urandom", "rb") as fd:
t.data = fd.read(20)
t.write(t.data)
t.seek(0)
return t
def setup_dummy_server(module_name, class_name, app_name = None, authz_key_file=None, port=8888):
"""Helper function to set up a :class:`~.RESTMain` server from given
module and class. Creates a fake server configuration and instantiates
the server application from it.
:arg str module_name: module from which to import test class.
:arg str class_type: name of the server test class.
:arg str app_name: optional test application name, 'test' by default.
:returns: tuple with the server object and authz hmac signing key."""
if authz_key_file:
test_authz_key = authz_key_file
else:
test_authz_key = fake_authz_key_file()
cfg = Configuration()
main = cfg.section_('main')
main.application = app_name or 'test'
main.silent = True
main.index = 'top'
main.authz_defaults = { 'role': None, 'group': None, 'site': None }
main.section_('tools').section_('cms_auth').key_file = test_authz_key.name
app = cfg.section_(app_name or 'test')
app.admin = 'dada@example.org'
app.description = app.title = 'Test'
views = cfg.section_('views')
top = views.section_('top')
top.object = module_name + "." + class_name
server = RESTMain(cfg, os.getcwd())
server.validate_config()
server.setup_server()
server.install_application()
cherrypy.config.update({'server.socket_port': port})
cherrypy.config.update({'server.socket_host': '127.0.0.1'})
cherrypy.config.update({'request.show_tracebacks': True})
cherrypy.config.update({'environment': 'test_suite'})
for app in viewvalues(cherrypy.tree.apps):
if '/' in app.config:
app.config["/"]["request.show_tracebacks"] = True
return server, test_authz_key | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/REST/Test.py | 0.631935 | 0.193147 | Test.py | pypi |
from __future__ import division, print_function, absolute_import
from future import standard_library
standard_library.install_aliases()
# system modules
import json
import logging
import math
import re
import time
from urllib.parse import quote, unquote
# WMCore modules
from Utils.IteratorTools import grouper
from Utils.CertTools import ckey, cert
from WMCore.Services.pycurl_manager import RequestHandler
from WMCore.Services.pycurl_manager import getdata as multi_getdata
# DBS agregators
from dbs.apis.dbsClient import aggRuns, aggFileLumis
# static variables
STEP_PAT = re.compile(r'Step[0-9]')
TASK_PAT = re.compile(r'Task[0-9]')
def hasHTTPFailed(row):
"""
Evaluates whether the HTTP request through PyCurl failed or not.
:param row: dictionary data returned from pycurl_manager module
:return: a boolean confirming failure or not
"""
if 'data' not in row:
return True
if int(row.get('code', 200)) == 200:
return False
return True
def getMSLogger(verbose, logger=None):
"""
_getMSLogger_
Return a logger object using the standard WMCore formatter
:param verbose: boolean setting debug or not
:return: a logger object
"""
if logger:
return logger
verbose = logging.DEBUG if verbose else logging.INFO
logger = logging.getLogger()
logging.basicConfig(format="%(asctime)s:%(levelname)s:%(module)s: %(message)s",
level=verbose)
return logger
def dbsInfo(datasets, dbsUrl):
"Provides DBS info about dataset blocks"
datasetBlocks = {}
datasetSizes = {}
datasetTransfers = {}
if not datasets:
return datasetBlocks, datasetSizes, datasetTransfers
urls = ['%s/blocks?detail=True&dataset=%s' % (dbsUrl, d) for d in datasets]
logging.info("Executing %d requests against DBS 'blocks' API, with details", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if hasHTTPFailed(row):
print("FAILURE: dbsInfo for %s. Error: %s %s" % (dataset, row.get('code'), row.get('error')))
continue
rows = json.loads(row['data'])
blocks = []
size = 0
datasetTransfers.setdefault(dataset, {}) # flat dict in the format of blockName: blockSize
for item in rows:
blocks.append(item['block_name'])
size += item['block_size']
datasetTransfers[dataset].update({item['block_name']: item['block_size']})
datasetBlocks[dataset] = blocks
datasetSizes[dataset] = size
return datasetBlocks, datasetSizes, datasetTransfers
def getPileupDatasetSizes(datasets, phedexUrl):
"""
Given a list of datasets, find all their blocks with replicas
available, i.e., blocks that have valid files to be processed,
and calculate the total dataset size
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:return: a dictionary of datasets and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
sizeByDset = {}
if not datasets:
return sizeByDset
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
sizeByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
sizeByDset.setdefault(dataset, 0)
try:
for item in rows['phedex']['block']:
sizeByDset[dataset] += item['bytes']
except Exception as exc:
print("Failure in getPileupDatasetSizes for dataset %s. Error: %s" % (dataset, str(exc)))
sizeByDset[dataset] = None
return sizeByDset
def getBlockReplicasAndSize(datasets, phedexUrl, group=None):
"""
Given a list of datasets, find all their blocks with replicas
available (thus blocks with at least 1 valid file), completed
and subscribed.
If PhEDEx group is provided, make sure it's subscribed under that
same group.
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:param group: optional PhEDEx group name
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
dsetBlockSize = {}
if not datasets:
return dsetBlockSize
urls = ['%s/blockreplicas?dataset=%s' % (phedexUrl, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'blockreplicas' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if row['data'] is None:
print("Failure in getBlockReplicasAndSize for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
dsetBlockSize.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
dsetBlockSize.setdefault(dataset, {})
try:
for item in rows['phedex']['block']:
block = {item['name']: {'blockSize': item['bytes'], 'locations': []}}
for repli in item['replica']:
if repli['complete'] == 'y' and repli['subscribed'] == 'y':
if not group:
block[item['name']]['locations'].append(repli['node'])
elif repli['group'] == group:
block[item['name']]['locations'].append(repli['node'])
dsetBlockSize[dataset].update(block)
except Exception as exc:
print("Failure in getBlockReplicasAndSize for dataset %s. Error: %s" % (dataset, str(exc)))
dsetBlockSize[dataset] = None
return dsetBlockSize
def getPileupSubscriptions(datasets, phedexUrl, group=None, percentMin=99):
"""
Provided a list of datasets, find dataset level subscriptions where it's
as complete as `percent_min`.
:param datasets: list of dataset names
:param phedexUrl: a string with the PhEDEx URL
:param group: optional string with the PhEDEx group
:param percent_min: only return subscriptions that are this complete
:return: a dictionary of datasets and a list of their location.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
locationByDset = {}
if not datasets:
return locationByDset
if group:
url = "%s/subscriptions?group=%s" % (phedexUrl, group)
url += "&percent_min=%s&dataset=%s"
else:
url = "%s/subscriptions?" % phedexUrl
url += "percent_min=%s&dataset=%s"
urls = [url % (percentMin, dset) for dset in datasets]
logging.info("Executing %d requests against PhEDEx 'subscriptions' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].rsplit('=')[-1]
if row['data'] is None:
print("Failure in getPileupSubscriptions for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
locationByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
locationByDset.setdefault(dataset, [])
try:
for item in rows['phedex']['dataset']:
for subs in item['subscription']:
locationByDset[dataset].append(subs['node'])
except Exception as exc:
print("Failure in getPileupSubscriptions for dataset %s. Error: %s" % (dataset, str(exc)))
locationByDset[dataset] = None
return locationByDset
def getBlocksByDsetAndRun(datasetName, runList, dbsUrl):
"""
Given a dataset name and a list of runs, find all the blocks
:return: flat list of blocks
"""
blocks = set()
if isinstance(runList, set):
runList = list(runList)
urls = []
for runSlice in grouper(runList, 50):
urls.append('%s/blocks?run_num=%s&dataset=%s' % (dbsUrl, str(runSlice).replace(" ", ""), datasetName))
logging.info("Executing %d requests against DBS 'blocks' API, with run_num list", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].rsplit('=')[-1]
if hasHTTPFailed(row):
msg = "Failure in getBlocksByDsetAndRun for %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
for item in rows:
blocks.add(item['block_name'])
return list(blocks)
def getFileLumisInBlock(blocks, dbsUrl, validFileOnly=1):
"""
Given a list of blocks, find their file run lumi information
in DBS for up to 10 blocks concurrently
:param blocks: list of block names
:param dbsUrl: string with the DBS URL
:param validFileOnly: integer flag for valid files only or not
:return: a dict of blocks with list of file/run/lumi info
"""
runLumisByBlock = {}
urls = ['%s/filelumis?validFileOnly=%d&block_name=%s' % (dbsUrl, validFileOnly, quote(b)) for b in blocks]
# limit it to 10 concurrent calls not to overload DBS
logging.info("Executing %d requests against DBS 'filelumis' API, concurrency limited to 10", len(urls))
data = multi_getdata(urls, ckey(), cert(), num_conn=10)
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
if hasHTTPFailed(row):
msg = "Failure in getFileLumisInBlock for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
rows = aggFileLumis(rows) # adjust to DBS Go server output
runLumisByBlock.setdefault(blockName, [])
for item in rows:
runLumisByBlock[blockName].append(item)
return runLumisByBlock
def findBlockParents(blocks, dbsUrl):
"""
Helper function to find block parents given a list of block names.
Return a dictionary in the format of:
{"child dataset name": {"child block": ["parent blocks"],
"child block": ["parent blocks"], ...}}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
parentsByBlock = {}
urls = ['%s/blockparents?block_name=%s' % (dbsUrl, quote(b)) for b in blocks]
logging.info("Executing %d requests against DBS 'blockparents' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
dataset = blockName.split("#")[0]
if hasHTTPFailed(row):
print("Failure in findBlockParents for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error')))
parentsByBlock.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
try:
if dataset in parentsByBlock and parentsByBlock[dataset] is None:
# then one of the block calls has failed, keep it failed!
continue
parentsByBlock.setdefault(dataset, {})
for item in rows:
parentsByBlock[dataset].setdefault(item['this_block_name'], set())
parentsByBlock[dataset][item['this_block_name']].add(item['parent_block_name'])
except Exception as exc:
print("Failure in findBlockParents for block %s. Error: %s" % (blockName, str(exc)))
parentsByBlock[dataset] = None
return parentsByBlock
def getRunsInBlock(blocks, dbsUrl):
"""
Provided a list of block names, find their run numbers
:param blocks: list of block names
:param dbsUrl: string with the DBS URL
:return: a dictionary of block names and a list of run numbers
"""
runsByBlock = {}
urls = ['%s/runs?block_name=%s' % (dbsUrl, quote(b)) for b in blocks]
logging.info("Executing %d requests against DBS 'runs' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
blockName = unquote(row['url'].rsplit('=')[-1])
if hasHTTPFailed(row):
msg = "Failure in getRunsInBlock for block %s. Error: %s %s" % (blockName,
row.get('code'),
row.get('error'))
raise RuntimeError(msg)
rows = json.loads(row['data'])
rows = aggRuns(rows) # adjust to DBS Go server output
runsByBlock[blockName] = rows[0]['run_num']
return runsByBlock
def getWorkflow(requestName, reqMgrUrl):
"Get list of workflow info from ReqMgr2 data-service for given request name"
headers = {'Accept': 'application/json'}
params = {}
url = '%s/data/request/%s' % (reqMgrUrl, requestName)
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
data = json.loads(res)
return data.get('result', [])
def getDetoxQuota(url):
"Get list of workflow info from ReqMgr2 data-service for given request name"
headers = {}
params = {}
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
res = res.split('\n')
return res
def eventsLumisInfo(inputs, dbsUrl, validFileOnly=0, sumOverLumi=0):
"Get information about events and lumis for given set of inputs: blocks or datasets"
what = 'dataset'
eventsLumis = {}
if not inputs:
return eventsLumis
if '#' in inputs[0]: # inputs are list of blocks
what = 'block_name'
urls = ['%s/filesummaries?validFileOnly=%s&sumOverLumi=%s&%s=%s'
% (dbsUrl, validFileOnly, sumOverLumi, what, quote(i)) for i in inputs]
data = multi_getdata(urls, ckey(), cert())
for row in data:
data = unquote(row['url'].split('=')[-1])
if hasHTTPFailed(row):
print("FAILURE: eventsLumisInfo for %s. Error: %s %s" % (data,
row.get('code'),
row.get('error')))
continue
rows = json.loads(row['data'])
for item in rows:
eventsLumis[data] = item
return eventsLumis
def getEventsLumis(dataset, dbsUrl, blocks=None, eventsLumis=None):
"Helper function to return number of events/lumis for given dataset or blocks"
nevts = nlumis = 0
if blocks:
missingBlocks = [b for b in blocks if b not in eventsLumis]
if missingBlocks:
eLumis = eventsLumisInfo(missingBlocks, dbsUrl)
eventsLumis.update(eLumis)
for block in blocks:
data = eventsLumis[block]
nevts += data['num_event']
nlumis += data['num_lumi']
return nevts, nlumis
if eventsLumis and dataset in eventsLumis:
data = eventsLumis[dataset]
return data['num_event'], data['num_lumi']
eLumis = eventsLumisInfo([dataset], dbsUrl)
data = eLumis.get(dataset, {'num_event': 0, 'num_lumi': 0})
return data['num_event'], data['num_lumi']
def getComputingTime(workflow, eventsLumis=None, unit='h', dbsUrl=None, logger=None):
"Return computing time per give workflow"
logger = getMSLogger(verbose=True, logger=logger)
cput = None
if 'InputDataset' in workflow:
dataset = workflow['InputDataset']
if 'BlockWhitelist' in workflow and workflow['BlockWhitelist']:
nevts, _ = getEventsLumis(dataset, dbsUrl, workflow['BlockWhitelist'], eventsLumis)
else:
nevts, _ = getEventsLumis(dataset, dbsUrl, eventsLumis=eventsLumis)
tpe = workflow['TimePerEvent']
cput = nevts * tpe
elif 'Chain' in workflow['RequestType']:
base = workflow['RequestType'].replace('Chain', '')
itask = 1
cput = 0
carryOn = {}
while True:
t = '%s%d' % (base, itask)
itask += 1
if t in workflow:
task = workflow[t]
if 'InputDataset' in task:
dataset = task['InputDataset']
if 'BlockWhitelist' in task and task['BlockWhitelist']:
nevts, _ = getEventsLumis(dataset, dbsUrl, task['BlockWhitelist'], eventsLumis)
else:
nevts, _ = getEventsLumis(dataset, dbsUrl, eventsLumis=eventsLumis)
elif 'Input%s' % base in task:
nevts = carryOn[task['Input%s' % base]]
elif 'RequestNumEvents' in task:
nevts = float(task['RequestNumEvents'])
else:
logger.debug("this is not supported, making it zero cput")
nevts = 0
tpe = task.get('TimePerEvent', 1)
carryOn[task['%sName' % base]] = nevts
if 'FilterEfficiency' in task:
carryOn[task['%sName' % base]] *= task['FilterEfficiency']
cput += tpe * nevts
else:
break
else:
nevts = float(workflow.get('RequestNumEvents', 0))
feff = float(workflow.get('FilterEfficiency', 1))
tpe = workflow.get('TimePerEvent', 1)
cput = nevts / feff * tpe
if cput is None:
return 0
if unit == 'm':
cput = cput / (60.)
if unit == 'h':
cput = cput / (60. * 60.)
if unit == 'd':
cput = cput / (60. * 60. * 24.)
return cput
def sigmoid(x):
"Sigmoid function"
return 1. / (1 + math.exp(-x))
def getNCopies(cpuHours, minN=2, maxN=3, weight=50000, constant=100000):
"Calculate number of copies for given workflow"
func = sigmoid(-constant / weight)
fact = (maxN - minN) / (1 - func)
base = (func * maxN - minN) / (func - 1)
return int(base + fact * sigmoid((cpuHours - constant) / weight))
def teraBytes(size):
"Return size in TB (Terabytes)"
return size / (1000 ** 4)
def gigaBytes(size):
"Return size in GB (Gigabytes), rounded to 2 digits"
return round(size / (1000 ** 3), 2)
def elapsedTime(time0, msg='Elapsed time', ndigits=1):
"Helper function to return elapsed time message"
msg = "%s: %s sec" % (msg, round(time.time() - time0, ndigits))
return msg
def getRequest(url, params):
"Helper function to GET data from given URL"
mgr = RequestHandler()
headers = {'Accept': 'application/json'}
verbose = 0
if 'verbose' in params:
verbose = params['verbose']
del params['verbose']
data = mgr.getdata(url, params, headers, ckey=ckey(), cert=cert(), verbose=verbose)
return data
def postRequest(url, params):
"Helper function to POST request to given URL"
mgr = RequestHandler()
headers = {'Accept': 'application/json'}
verbose = 0
if 'verbose' in params:
verbose = params['verbose']
del params['verbose']
data = mgr.getdata(url, params, headers, ckey=ckey(), cert=cert(),
verb='POST', verbose=verbose)
return data
def getIO(request, dbsUrl):
"Get input/output info about given request"
lhe = False
primary = set()
parent = set()
secondary = set()
if 'Chain' in request['RequestType']:
base = request['RequestType'].replace('Chain', '')
item = 1
while '%s%d' % (base, item) in request:
alhe, aprimary, aparent, asecondary = \
ioForTask(request['%s%d' % (base, item)], dbsUrl)
if alhe:
lhe = True
primary.update(aprimary)
parent.update(aparent)
secondary.update(asecondary)
item += 1
else:
lhe, primary, parent, secondary = ioForTask(request, dbsUrl)
return lhe, primary, parent, secondary
def ioForTask(request, dbsUrl):
"Return lfn, primary, parent and secondary datasets for given request"
lhe = False
primary = set()
parent = set()
secondary = set()
if 'InputDataset' in request:
datasets = request['InputDataset']
datasets = datasets if isinstance(datasets, list) else [datasets]
primary = set([r for r in datasets if r])
if primary and 'IncludeParent' in request and request['IncludeParent']:
parent = findParent(primary, dbsUrl)
if 'MCPileup' in request:
pileups = request['MCPileup']
pileups = pileups if isinstance(pileups, list) else [pileups]
secondary = set([r for r in pileups if r])
if 'LheInputFiles' in request and request['LheInputFiles'] in ['True', True]:
lhe = True
return lhe, primary, parent, secondary
def findParent(datasets, dbsUrl):
"""
Helper function to find the parent dataset.
It returns a dictionary key'ed by the child dataset
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
parentByDset = {}
if not datasets:
return parentByDset
urls = ['%s/datasetparents?dataset=%s' % (dbsUrl, d) for d in datasets]
logging.info("Executing %d requests against DBS 'datasetparents' API", len(urls))
data = multi_getdata(urls, ckey(), cert())
for row in data:
dataset = row['url'].split('=')[-1]
if hasHTTPFailed(row):
print("Failure in findParent for dataset %s. Error: %s %s" % (dataset,
row.get('code'),
row.get('error')))
parentByDset.setdefault(dataset, None)
continue
rows = json.loads(row['data'])
try:
for item in rows:
parentByDset[item['this_dataset']] = item['parent_dataset']
except Exception as exc:
print("Failure in findParent for dataset %s. Error: %s" % (dataset, str(exc)))
parentByDset[dataset] = None
return parentByDset | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/MicroService/Tools/Common.py | 0.682256 | 0.23118 | Common.py | pypi |
from __future__ import print_function, division, absolute_import
from builtins import str
from future.utils import viewitems
from future import standard_library
standard_library.install_aliases()
import datetime
import json
import logging
import re
from urllib.parse import quote, unquote
from Utils.CertTools import cert, ckey
from WMCore.Services.pycurl_manager import RequestHandler
from WMCore.Services.pycurl_manager import getdata as multi_getdata
### Amount of days that we wait for stuck rules to be sorted
### After that, the rule is not considered and a new rule is created
STUCK_LIMIT = 7 # 7 days
def parseNewLineJson(stream):
"""
Parse newline delimited json streaming data
"""
for line in stream.split("\n"):
if line:
yield json.loads(line)
def stringDateToEpoch(strDate):
"""
Given a date/time in the format of:
'Thu, 29 Apr 2021 13:15:42 UTC'
it returns an integer with the equivalent EPOCH time
:param strDate: a string with the date and time
:return: the equivalent EPOCH time (integer)
"""
timestamp = datetime.datetime.strptime(strDate, "%a, %d %b %Y %H:%M:%S %Z")
return int(timestamp.strftime('%s'))
def getRucioToken(rucioAuthUrl, rucioAcct):
"""
Provided a Rucio account, fetch a token from the authentication server
:param rucioAuthUrl: url to the rucio authentication server
:param rucioAcct: rucio account to be used
:return: an integer with the expiration time in EPOCH
"""
params = {}
headers = {"X-Rucio-Account": rucioAcct}
url = '%s/auth/x509' % rucioAuthUrl
logging.info("Requesting a token to Rucio for account: %s, against url: %s", rucioAcct, rucioAuthUrl)
mgr = RequestHandler()
res = mgr.getheader(url, params=params, headers=headers, ckey=ckey(), cert=cert())
if res.getReason() == "OK":
userToken = res.getHeaderKey('X-Rucio-Auth-Token')
tokenExpiration = res.getHeaderKey('X-Rucio-Auth-Token-Expires')
logging.info("Retrieved Rucio token valid until: %s", tokenExpiration)
# convert the human readable expiration time to EPOCH time
tokenExpiration = stringDateToEpoch(tokenExpiration)
return userToken, tokenExpiration
raise RuntimeError("Failed to acquire a Rucio token. Error: {}".format(res.getReason()))
def renewRucioToken(rucioAuthUrl, userToken):
"""
Provided a user Rucio token, check it's lifetime and extend it by another hour
:param rucioAuthUrl: url to the rucio authentication server
:param rucioAcct: rucio account to be used
:return: a datetime.datetime object with the new token lifetime
"""
params = {}
headers = {"X-Rucio-Auth-Token": userToken}
url = '%s/auth/validate' % rucioAuthUrl
logging.info("Renewing the Rucio token...")
mgr = RequestHandler()
res = mgr.getdata(url, params=params, headers=headers, ckey=ckey(), cert=cert())
try:
newExpiration = eval(res)['lifetime']
except Exception as exc:
raise RuntimeError("Failed to renew Rucio token. Response: {} Error: {}".format(res, str(exc)))
return newExpiration
def getPileupContainerSizesRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of containers, find their total size in Rucio
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a flat dictionary of container and their respective sizes
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE: Rucio version of getPileupDatasetSizes()
"""
sizeByDset = {}
if not containers:
return sizeByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = ['{}/dids/{}/{}?dynamic=anything'.format(rucioUrl, scope, cont) for cont in containers]
logging.info("Executing %d requests against Rucio for the container size", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split('/dids/{}/'.format(scope))[1]
container = container.replace("?dynamic=anything", "")
if row['data'] is None:
msg = "Failure in getPileupContainerSizesRucio for container {}. Response: {}".format(container, row)
logging.error(msg)
sizeByDset.setdefault(container, None)
continue
response = json.loads(row['data'])
try:
sizeByDset.setdefault(container, response['bytes'])
except KeyError:
msg = "getPileupContainerSizesRucio function did not return a valid response for container: %s. Error: %s"
logging.error(msg, container, response)
sizeByDset.setdefault(container, None)
continue
return sizeByDset
def listReplicationRules(containers, rucioAccount, grouping,
rucioUrl, rucioToken, scope="cms"):
"""
List all the replication rules for the input filters provided.
It builds a dictionary of container name and the locations where
they have a rule locking data on, with some additional rule state
logic in the code.
:param containers: list of container names
:param rucioAccount: string with the rucio account
:param grouping: rule grouping string, only "A" or "D" are allowed
:param rucioUrl: string with the Rucio url
:param rucioToken: string with the Rucio token
:param scope: string with the data scope
:return: a flat dictionary key'ed by the container name, with a list of RSE
expressions that still need to be resolved
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE-2: Available rule states can be found at:
https://github.com/rucio/rucio/blob/16f39dffa1608caa0a1af8bbc0fcff2965dccc50/lib/rucio/db/sqla/constants.py#L180
"""
locationByContainer = {}
if not containers:
return locationByContainer
if grouping not in ["A", "D"]:
raise RuntimeError("Replication rule grouping value provided ({}) is not allowed!".format(grouping))
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = []
for cont in containers:
urls.append('{}/rules/?scope={}&account={}&grouping={}&name={}'.format(rucioUrl, scope, rucioAccount,
grouping, quote(cont, safe="")))
logging.info("Executing %d requests against Rucio to list replication rules", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = unquote(row['url'].split("name=")[1])
if "200 OK" not in row['headers']:
msg = "Failure in listReplicationRules for container {}. Response: {}".format(container, row)
logging.error(msg)
locationByContainer.setdefault(container, None)
continue
try:
locationByContainer.setdefault(container, [])
for item in parseNewLineJson(row['data']):
if item['state'] in ["U", "SUSPENDED", "R", "REPLICATING", "I", "INJECT"]:
msg = "Container %s has a rule ID %s in state %s. Will try to create a new rule."
logging.warning(msg, container, item['id'], item['state'])
continue
elif item['state'] in ["S", "STUCK"]:
if item['error'] == 'NO_SOURCES:NO_SOURCES':
msg = "Container {} has a STUCK rule with NO_SOURCES.".format(container)
msg += " Data could be lost forever... Rule info is: {}".format(item)
logging.warning(msg)
continue
# then calculate for how long it's been stuck
utcTimeNow = int(datetime.datetime.utcnow().strftime('%s'))
if item['stuck_at']:
stuckAt = stringDateToEpoch(item['stuck_at'])
else:
# consider it to be stuck since its creation
stuckAt = stringDateToEpoch(item['created_at'])
daysStuck = (utcTimeNow - stuckAt) // (24 * 60 * 60)
if daysStuck > STUCK_LIMIT:
msg = "Container {} has a STUCK rule for {} days (limit set to: {}).".format(container,
daysStuck,
STUCK_LIMIT)
msg += " Not going to use it! Rule info: {}".format(item)
logging.warning(msg)
continue
else:
msg = "Container {} has a STUCK rule for only {} days.".format(container, daysStuck)
msg += " Considering it for the pileup location"
logging.info(msg)
else:
logging.info("Container %s has rule ID %s in state %s, using it.",
container, item['id'], item['state'])
### NOTE: this is not an RSE name, but an RSE expression that still needs to be resolved
locationByContainer[container].append(item['rse_expression'])
except Exception as exc:
msg = "listReplicationRules function did not return a valid response for container: %s."
msg += "Server responded with: %s\nError: %s"
logging.exception(msg, container, str(exc), row['data'])
locationByContainer.setdefault(container, None)
continue
return locationByContainer
def getPileupSubscriptionsRucio(datasets, rucioUrl, rucioToken, scope="cms"):
"""
Provided a list of datasets, find dataset level subscriptions where it's
as complete as `percent_min`.
:param datasets: list of dataset names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary of datasets and a list of their location.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
# FIXME: we should definitely make a feature request to Rucio...
# so much, just to get the final RSEs for a container!!!
locationByDset = {}
if not datasets:
return locationByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
# first, resolve the dataset into blocks
blocksByDset = getContainerBlocksRucio(datasets, rucioUrl, rucioToken, scope)
urls = []
for _dset, blocks in viewitems(blocksByDset):
if blocks:
for block in blocks:
urls.append('{}/replicas/{}/{}/datasets'.format(rucioUrl, scope, quote(block)))
# this is going to be bloody expensive in terms of HTTP requests
logging.info("Executing %d requests against Rucio replicas API for blocks", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
block = row['url'].split("/{}/".format(scope))[1]
block = unquote(re.sub("/datasets$", "", block, 1))
container = block.split("#")[0]
locationByDset.setdefault(container, set())
if row['data'] is None:
msg = "Failure in getPileupSubscriptionsRucio container {} and block {}.".format(container, block)
msg += " Response: {}".format(row)
logging.error(msg)
locationByDset[container] = None
continue
if locationByDset[container] is None:
# then one of the block requests failed, skip the whole dataset
continue
thisBlockRSEs = set()
for item in parseNewLineJson(row['data']):
if item['state'] == "AVAILABLE":
thisBlockRSEs.add(item["rse"])
logging.info("Block: %s is available at: %s", block, thisBlockRSEs)
# now we have the final block location
if not locationByDset[container]:
# then this is the first block of this dataset
locationByDset[container] = thisBlockRSEs
else:
# otherwise, make an intersection of them
locationByDset[container] = locationByDset[container] & thisBlockRSEs
return locationByDset
def getBlocksAndSizeRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of containers, find all their correspondent blocks and their sizes.
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
NOTE2: meant to return an output similar to Common.getBlockReplicasAndSize
"""
contBlockSize = {}
if not containers:
return contBlockSize
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = []
for cont in containers:
urls.append('{}/dids/{}/dids/search?type=dataset&long=True&name={}'.format(rucioUrl, scope, quote(cont + "#*")))
logging.info("Executing %d requests against Rucio DIDs search API for containers", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split("name=")[1]
container = unquote(container).replace("#*", "")
contBlockSize.setdefault(container, {})
if row['data'] in [None, ""]:
msg = "Failure in getBlocksAndSizeRucio function for container {}. Response: {}".format(container, row)
logging.error(msg)
contBlockSize[container] = None
continue
for item in parseNewLineJson(row['data']):
# NOTE: we do not care about primary block location in Rucio
contBlockSize[container][item['name']] = {"blockSize": item['bytes'], "locations": []}
return contBlockSize
### NOTE: likely not going to be used for a while
def getContainerBlocksRucio(containers, rucioUrl, rucioToken, scope="cms"):
"""
Provided a list of containers, find all their blocks.
:param containers: list of container names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary key'ed by the datasets with a list of blocks.
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
blocksByDset = {}
if not containers:
return blocksByDset
headers = {"X-Rucio-Auth-Token": rucioToken}
urls = ['{}/dids/{}/{}/dids'.format(rucioUrl, scope, cont) for cont in containers]
logging.info("Executing %d requests against Rucio DIDs API for blocks in containers", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
container = row['url'].split("/{}/".format(scope))[1]
container = re.sub("/dids$", "", container, 1)
if not row['data']:
logging.warning("Dataset: %s has no blocks in Rucio", container)
blocksByDset.setdefault(container, [])
for item in parseNewLineJson(row['data']):
blocksByDset[container].append(item["name"])
return blocksByDset
### NOTE: likely not going to be used for a while
def getBlockReplicasAndSizeRucio(datasets, rucioUrl, rucioToken, scope="cms"):
"""
Given a list of datasets, find all their blocks with replicas
available.
:param datasets: list of dataset names
:param rucioUrl: a string with the Rucio URL
:param rucioToken: a string with the user rucio token
:param scope: a string with the Rucio scope of our data
:return: a dictionary in the form of:
{"dataset":
{"block":
{"blockSize": 111, "locations": ["x", "y"]}
}
}
NOTE: Value `None` is returned in case the data-service failed to serve a given request.
"""
dsetBlockSize = {}
if not datasets:
return dsetBlockSize
headers = {"X-Rucio-Auth-Token": rucioToken}
# first, figure out their block names
blocksByDset = getContainerBlocksRucio(datasets, rucioUrl, rucioToken, scope=scope)
urls = []
for _dset, blocks in viewitems(blocksByDset):
for block in blocks:
urls.append('{}/replicas/{}/{}/datasets'.format(rucioUrl, scope, quote(block)))
# next, query the replicas API for the block location
# this is going to be bloody expensive in terms of HTTP requests
logging.info("Executing %d requests against Rucio replicas API for blocks", len(urls))
data = multi_getdata(urls, ckey(), cert(), headers=headers)
for row in data:
block = row['url'].split("/{}/".format(scope))[1]
block = unquote(re.sub("/datasets$", "", block, 1))
container = block.split("#")[0]
dsetBlockSize.setdefault(container, dict())
if row['data'] is None:
msg = "Failure in getBlockReplicasAndSizeRucio for container {} and block {}.".format(container, block)
msg += " Response: {}".format(row)
logging.error(msg)
dsetBlockSize[container] = None
continue
if dsetBlockSize[container] is None:
# then one of the block requests failed, skip the whole dataset
continue
thisBlockRSEs = []
blockBytes = 0
for item in parseNewLineJson(row['data']):
blockBytes = item['bytes']
if item['state'] == "AVAILABLE":
thisBlockRSEs.append(item["rse"])
# now we have the final block location
if not blockBytes and not thisBlockRSEs:
logging.warning("Block: %s has no replicas and no size", block)
else:
dsetBlockSize[container][block] = {"locations": thisBlockRSEs, "blockSize": blockBytes}
return dsetBlockSize | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/MicroService/Tools/PycurlRucio.py | 0.661923 | 0.218305 | PycurlRucio.py | pypi |
from __future__ import print_function, division
# system modules
import os
import re
# WMCore modules
from WMCore.REST.Server import RESTFrontPage
class FrontPage(RESTFrontPage):
"""MicroService front page.
MicroService provides only one web page, the front page. The page just
loads the javascript user interface, complete with CSS and all JS
code embedded into it.
The JavaScript code performs all the app functionality via the REST
interface defined by the :class:`~.Data` class.
"""
def __init__(self, app, config, mount):
"""
:arg app: reference to the application object.
:arg config: reference to the configuration.
:arg str mount: URL mount point."""
mainroot = 'microservice' # entry point in access URL
wpath = os.getenv('MS_STATIC_ROOT', '')
print(wpath)
if not wpath:
content = os.path.abspath(__file__).rsplit('/', 5)[0]
xlib = (__file__.find("/xlib/") >= 0 and "x") or ""
wpath = "%s/%sdata/" % (content, xlib)
if not wpath.endswith('/'):
wpath += '/'
print(self.__class__.__name__, "static content: %s" % wpath)
mdict = {"root": wpath, "rx": re.compile(r"^[a-z]+/[-a-z0-9]+\.(?:html)$")}
tdict = {"root": wpath + "templates/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:html|tmpl)$")}
jdict = {"root": wpath + "js/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:js)$")}
cdict = {"root": wpath + "css/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\..*(?:css)$")}
idict = {"root": wpath + "images/",
"rx": re.compile(r"^([a-zA-Z]+/)*[-a-z0-9_]+\.(?:png|gif|jpg)$")}
roots = {mainroot: mdict, "templates": tdict,
"js": jdict, "css": cdict, "images": idict}
# location of frontpage in the root, e.g. microservice
frontpage = "%s/templates/index.html" % mainroot
RESTFrontPage.__init__(self, app, config, mount, frontpage, roots) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/MicroService/WebGui/FrontPage.py | 0.585931 | 0.161816 | FrontPage.py | pypi |
from __future__ import division, print_function
from builtins import object, str, bytes
from future.utils import viewitems
from copy import deepcopy
from Utils.IteratorTools import flattenList
class WfParser(object):
"""
Workflow description parser class.
"""
def __init__(self, docSchema):
"""
The init method for the Workflow parser class.
:param docSchema: Document template in the form of a list of tuples as follows:
[('KeyName', DefaultValue, type),
('KeyName', DefaultValue, type),
...]
To be used for identifying the fields to be searched for
in the workflow description
"""
self.extDoc = {}
for tup in docSchema:
self.extDoc[tup[0]] = {'keyName': tup[0],
'values': list(),
'default': tup[1],
'type': tup[2]}
def __call__(self, wfDescr):
"""
The Call method for the Workflow parser class.
"""
self._paramFinder(wfDescr)
self._wfParse()
return self.extDoc
def _paramFinder(self, wfObj):
"""
Private method used to recursively traverse a workflow description
and search for all the keyNames defined in the extDoc auxiliary data
structure. If a 'keyName' happens to be present in several nested levels,
or in several similar objects from the same level (like {'Task1': {},
'Task2': {} ...), all the values found are accumulated in the respective
(flat) list at extDoc[keyName]['values'], which is later to be converted
to the originally expected type for the given field as described in the
Document Template
:param wfObj: Dictionary containing the workflow description
"""
if isinstance(wfObj, (list, set, tuple)):
for value in wfObj:
self._paramFinder(value)
if isinstance(wfObj, dict):
for key, value in viewitems(wfObj):
self._paramFinder(value)
for key in self.extDoc:
if key in wfObj:
self.extDoc[key]['values'].append(deepcopy(wfObj[key]))
def _wfParse(self):
"""
Workflow description parser. Given a document template representing all the
keyNames to be searched and a workflow description to search in recursively,
returns all the fields that it can find aggregated according to the rules bellow:
* if the number of found key instances is 0 - sets the default value from
the template.
* if the number of found key instances is 1 - sets the so found value from the
workflow description and converts it back to the form expected and described
in the template (removes the outermost list used for value aggregation)
* if the number of found key instances is > 1 - the values are aggregated
according to the expected types and data structure defined in the
template as follows:
* bool: sets it to True if any of the values found was set to True
* list: chains/flattens all the sub lists into a single list containing
all the values found
* dict: aggregates/flattens all the key-value pairs from all the
dictionaries found into one big dictionary
WARNING: (if an inner keyName happens to be found in multiple
dictionaries from the aggregated list of dictionaries
it will be overwritten with the values from the last
one to be merged into the finally constructed dictionary)!
* str: will be accumulated in a list containing all the values found
WARNING: (will change the expected structure of the field from
a single string to a list of strings)!
:param wfDescr: Dictionary with the workflow description
:param docTemplate: Document template in the form of a list of tuples as follows:
[('KeyName', DefaultValue, type),
('KeyName', DefaultValue, type),
...]
To be used for identifying the fields to be searched for
in the workflow description
"""
# Convert back the so aggregated extDoc to the original structure:
for keyName, data in viewitems(self.extDoc):
if len(data['values']) == 0:
self.extDoc[keyName] = deepcopy(data['default'])
elif len(data['values']) == 1:
self.extDoc[keyName] = deepcopy(data['values'][0])
elif len(data['values']) > 1:
if data['type'] is bool:
self.extDoc[keyName] = any(data['values'])
elif data['type'] is list:
self.extDoc[keyName] = list(set(flattenList(data['values'])))
# WARNING: If it happens this list to be constructed out of elements
# which are instances of unhashable types (e.g. dict, list)
# the set() call will produce an ERR, but this is unlikely
# to happen, see [1] - All the fields we fetch from the
# so nested structure of Task/Step Chain dictionary are
# of hashable types.
# [1] https://github.com/dmwm/WMCore/blob/ed40d33069bdddcd98ed5b8430d5ca6662e5941f/src/python/WMCore/WMSpec/StdSpecs/StdBase.py#L1189
elif data['type'] is dict:
self.extDoc[keyName] = {}
for item in data['values']:
self.extDoc[keyName].update(item)
elif (isinstance(data['type'], tuple) and (bytes in data['type'] or str in data['type'])) or \
(data['type'] is bytes or data['type'] is str):
data['values'] = list(set(data['values']))
if len(data['values']) == 1:
self.extDoc[keyName] = deepcopy(data['values'][0])
else:
self.extDoc[keyName] = deepcopy(data['values'])
class MSRuleCleanerWflow(dict):
"""
A minimal workflow and transfer information representation to serve the needs
of the MSRuleCleaner Micro Service.
"""
def __init__(self, wfDescr, **kwargs):
super(MSRuleCleanerWflow, self).__init__(**kwargs)
# Search for all the keys we need from the ReqManager workflow description
wfParser = WfParser(self.docSchema())
myDoc = wfParser(wfDescr)
# Convert some fields to lists explicitly:
# NOTE: Those are fields defined as strings in the original workflow
# representation, but may turn into lists during the recursive
# search and we will use them as lists for the rest of the code.
for key in ['DataPileup', 'MCPileup', 'ParentDataset']:
if not isinstance(myDoc[key], list):
if myDoc[key] is None:
myDoc[key] = []
else:
myDoc[key] = [myDoc[key]]
self.update(myDoc)
def docSchema(self):
"""
Return the data schema for the document.
It's a tuple where:
* 1st element: is the key name / attribute in the request
* 2nd element: is the default value
* 3rd element: is the expected data type
Document format:
{
"RequestName": "ReqName",
"RequestType": "Type",
"RequestStatus": "Status",
"OutputDatasets": [],
'RulesToClean': {'plineMSTrCont': [],
'plineMSTrBlock': [],
'plineAgentCont': [],
'plineAgentBlock': []},
'CleanupStatus': {'plineMSTrCont': False,
'plineMSTrBlock': False,
'plineAgentCont': False,
'plineAgentBlock': False},
"TransferDone": False # information - returned by the MSOutput REST call.
"TransferTape": False # information - fetched by Rucio about tape rules completion
'TargetStatus': 'normal-archived' || 'rejected-achived' || 'aborted-archived',
'ParentageResolved': Bool,
'PlineMarkers': None,
'IsClean': False
'IsLogDBClean': False,
'IsArchivalDelayExpired': False,
'ForceArchive': False,
'RequestTransition': [],
'IncludeParents': False
'DataPileup': [],
'MCPileup': [],
'InputDataset': None,
'ParentDataset': []
}
:return: a list of tuples
"""
docTemplate = [
('RequestName', None, (bytes, str)),
('RequestType', None, (bytes, str)),
('RequestStatus', None, (bytes, str)),
('OutputDatasets', [], list),
('RulesToClean', {}, dict),
('CleanupStatus', {}, dict),
('TransferDone', False, bool),
('TransferTape', False, bool),
('TargetStatus', None, (bytes, str)),
('ParentageResolved', True, bool),
('PlineMarkers', None, list),
('IsClean', False, bool),
('IsLogDBClean', False, bool),
('IsArchivalDelayExpired', False, bool),
('ForceArchive', False, bool),
('RequestTransition', [], list),
('IncludeParents', False, bool),
('DataPileup', None, (bytes, str)),
('MCPileup', None, (bytes, str)),
('InputDataset', None, (bytes, str)),
('ParentDataset', None, (bytes, str))]
# NOTE: ParentageResolved is set by default to True it will be False only if:
# - RequestType is StepChain
# - The parent workflow is still in a transient status
# this should be one of the flags to be used to estimate if
# the workflow is good for archival
return docTemplate | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/MicroService/MSRuleCleaner/MSRuleCleanerWflow.py | 0.816662 | 0.496338 | MSRuleCleanerWflow.py | pypi |
from __future__ import print_function, division
from copy import deepcopy
import json
import re
from WMCore.MicroService.Tools.Common import getMSLogger
from WMCore.WMException import WMException
class RelValPolicyException(WMException):
"""
General exception to be raised when a flaw is found in the RelVal
output data placement policy
"""
pass
class RelValPolicy():
"""
This module will contain the RelVal output data placement policy, where
destinations will be decided according to the dataset datatier and the
container lifetime will be decided based on the sample type (pre-release
or not).
It's supposed to hold a policy driven by dataset datatier, and it's
data structure looks like:
[{"datatier": "tier_1", "destinations": ["rse_name_1", "rse_name_2"]},
{"datatier": "tier_2", "destinations": ["rse_name_2"]},
{"datatier": "default", "destinations": ["rse_name_3"]}]
The lifetime policy data structure is something like:
[{"releaseType": "pre", "lifetimeSecs": 120},
{"releaseType": "default", "lifetimeSecs": 360}]
the 'default' key matches the case where a datatier is not specified
in the policy.
"""
def __init__(self, tierPolicy, lifetimeDesc, listDatatiers, listRSEs, logger=None):
"""
Given a policy data structure - as a list of dictionaries - it
will validate the policy, the datatiers and RSEs defined in it,
and it will convert the policy into a flat dictionary for easier
data lookup.
:param tierPolicy: list of dictionary items with the output rules
:param lifetimeDesc: list of dictionary items with the output
lifetime rules
:param listDatatiers: flat list of existent datatiers in DBS
:param listRSEs: flat list of existent Disk RSEs in Rucio
:param logger: logger object, if any
"""
self.origTierPolicy = deepcopy(tierPolicy)
self.origLifetimePolicy = deepcopy(lifetimeDesc)
self.logger = getMSLogger(verbose=False, logger=logger)
self._validateTierPolicy(tierPolicy, listDatatiers, listRSEs)
self.tierPolicy = self._convertTierPolicy(tierPolicy)
self._validateLifetimePolicy(lifetimeDesc)
self.lifeTPolicy = self._convertLifePolicy(lifetimeDesc)
# regex to match against CMSSW pre-releases only, e.g.: CMSSW_1_2_3_pre12
self.preRegex = re.compile(r'CMSSW(_\d+){3}_pre(\d+)$')
def __str__(self):
"""
Stringify this object, printing the original policy
"""
objectOut = dict(originalTierPolicy=self.origTierPolicy, mappedTierPolicy=self.tierPolicy,
originalLifetimePolicy=self.origLifetimePolicy, mappedLifetimePolicy=self.lifeTPolicy)
return json.dumps(objectOut)
def _validateTierPolicy(self, policyDesc, validDBSTiers, validDiskRSEs):
"""
This method validates the overall policy data structure, including:
* internal and external data types
* whether the datatiers exist in DBS
* whether the RSEs exist in Rucio
:param policyDesc: list of dictionaries with the policy definition
:param validDBSTiers: list with existent DBS datatiers
:param validDiskRSEs: list with existent Rucio Disk RSEs
:return: nothing, but it will raise an exception if any validation fails
"""
if not isinstance(policyDesc, list):
msg = "The RelVal output data placement policy is not in the expected data type. "
msg += "Type expected: list, while the current data type is: {}. ".format(type(policyDesc))
msg += "This critical ERROR must be fixed."
raise RelValPolicyException(msg) from None
# policy must have a default/fallback destination for datatiers not explicitly listed
hasDefault = False
for item in policyDesc:
# validate the datatier
if not isinstance(item['datatier'], str):
msg = "The 'datatier' parameter must be a string, not {}.".format(type(item['datatier']))
raise RelValPolicyException(msg) from None
if item['datatier'] == "default":
hasDefault = True
elif item['datatier'] not in validDBSTiers:
raise RelValPolicyException("Datatier '{}' does not exist in DBS.".format(item['datatier']))
# validate the destinations
if not isinstance(item['destinations'], list):
msg = "The 'destinations' parameter must be a list, not {}".format(type(item['destinations']))
raise RelValPolicyException(msg) from None
for rseName in item['destinations']:
if rseName not in validDiskRSEs:
msg = "Destinations '{}' does not exist in Rucio.".format(rseName)
raise RelValPolicyException(msg) from None
if hasDefault is False:
msg = "A 'default' key must be defined with default destinations."
raise RelValPolicyException(msg) from None
def _validateLifetimePolicy(self, lifeTDesc):
"""
This method validates the lifetime RelVal policy data structure.
[{"releaseType": "pre", "lifetimeSecs": 120},
{"releaseType": "default", "lifetimeSecs": 360}]
:param lifeTDesc: list of dictionaries with the lifetime policy definition
:return: nothing, but it will raise an exception if any validation fails
"""
if not isinstance(lifeTDesc, list):
msg = "The RelVal lifetime output data placement policy is not in the expected data type. "
msg += "Type expected: list, while the current data type is: {}. ".format(type(lifeTDesc))
msg += "This critical ERROR must be fixed."
raise RelValPolicyException(msg) from None
# policy must have a default/fallback destination for non pre-releases
setRelTypes = set()
expRelTypesKeys = {"pre", "default"}
for item in lifeTDesc:
if not isinstance(item.get('releaseType', None), str):
msg = "The 'releaseType' parameter must be a string, not {}.".format(type(item['releaseType']))
raise RelValPolicyException(msg) from None
if item['releaseType'] not in expRelTypesKeys:
msg = "The 'releaseType' parameter does not match the expected values. "
msg += "Value provided '{}' not in {}.".format(item['releaseType'], expRelTypesKeys)
raise RelValPolicyException(msg) from None
if not isinstance(item['lifetimeSecs'], int):
msg = "The 'lifetimeSecs' parameter must be integer, not {}".format(type(item['lifetimeSecs']))
raise RelValPolicyException(msg) from None
if item['lifetimeSecs'] <= 0:
msg = "The 'lifetimeSecs' parameter cannot be 0 or negative"
raise RelValPolicyException(msg) from None
setRelTypes.add(item['releaseType'])
# last check, it must contain a policy for "pre" releases and "default"
if setRelTypes != expRelTypesKeys:
msg = "Policy must define rules for these 2 sample types: {}".format(expRelTypesKeys)
raise RelValPolicyException(msg) from None
def _convertTierPolicy(self, policyDesc):
"""
Maps the RelVal tier data policy to a flat dictionary key'ed by datatiers
:param policyDesc: list of dictionaries with the tier policy definition
:return: a dictionary with a map of the RelVal tier policy
"""
outputPolicy = dict()
for item in policyDesc:
outputPolicy.update({item['datatier']: item['destinations']})
return outputPolicy
def _convertLifePolicy(self, policyDesc):
"""
Maps the RelVal lifetime data policy to a flat dictionary key'ed
by the release cycle type (only supports pre or anything else).
:param policyDesc: list of dictionaries with the lifetime policy definition
:return: a dictionary with a map of the RelVal lifetime policy
"""
outputPolicy = dict()
for item in policyDesc:
outputPolicy.update({item['releaseType']: item['lifetimeSecs']})
return outputPolicy
def getDestinationByDataset(self, dsetName):
"""
Provided a dataset name, return the destination defined for its datatier.
:param dsetName: a string with the full dataset name
:return: a list of locations
"""
_, dsn, procString, dataTier = dsetName.split('/')
return self.tierPolicy.get(dataTier, self.tierPolicy['default'])
def _isPreRelease(self, dsetName):
"""
Helper function to determine whether the provided dataset name
belongs to a pre-release or not.
:param dsetName: string with the dataset name
:return: boolean whether it's a pre-release sample or not.
"""
try:
procString = dsetName.split('/')[2]
acqEra = procString.split('-')[0]
except Exception:
raise RuntimeError("RelVal dataset name invalid: {}".format(dsetName)) from None
return bool(self.preRegex.match(acqEra))
def getLifetimeByDataset(self, dsetName):
"""
Provided a dataset name, return the rule lifetime defined for
this sample/release type.
:param dsetName: a string with the full dataset name
:return: an integer with the lifetime in seconds
"""
if self._isPreRelease(dsetName):
return self.lifeTPolicy["pre"]
return self.lifeTPolicy["default"] | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/MicroService/MSOutput/RelValPolicy.py | 0.68679 | 0.358409 | RelValPolicy.py | pypi |
from __future__ import division, print_function
from future.utils import listvalues, listitems
from future import standard_library
standard_library.install_aliases()
# system modules
from operator import itemgetter
from pprint import pformat
from retry import retry
from copy import deepcopy
# WMCore modules
from Utils.IteratorTools import grouper
from WMCore.MicroService.DataStructs.DefaultStructs import TRANSFEROR_REPORT,\
TRANSFER_RECORD, TRANSFER_COUCH_DOC
from WMCore.MicroService.Tools.Common import gigaBytes, teraBytes
from WMCore.MicroService.MSCore import MSCore
from WMCore.MicroService.MSTransferor.RequestInfo import RequestInfo
from WMCore.MicroService.MSTransferor.DataStructs.RSEQuotas import RSEQuotas
from WMCore.Services.CRIC.CRIC import CRIC
def newTransferRec(dataIn):
"""
Create a basic transfer record to be appended to a transfer document
:param dataIn: dictionary with information relevant to this transfer doc
:return: a transfer record dictionary
"""
record = deepcopy(TRANSFER_RECORD)
record["dataset"] = dataIn['name']
record["dataType"] = dataIn['type']
record["campaignName"] = dataIn['campaign']
return record
def newTransferDoc(reqName, transferRecords):
"""
Create a transfer document which is meant to be created in
central CouchDB
:param reqName: string with the workflow name
:param transferRecords: list of dictionaries with transfer records
:return: a transfer document dictionary
"""
doc = dict(TRANSFER_COUCH_DOC)
doc["workflowName"] = reqName
doc["transfers"] = transferRecords
return doc
class MSTransferor(MSCore):
"""
MSTransferor class provide whole logic behind
the transferor module.
"""
def __init__(self, msConfig, logger=None):
"""
Runs the basic setup and initialization for the MS Transferor module
:param microConfig: microservice configuration
"""
super(MSTransferor, self).__init__(msConfig, logger=logger)
# minimum percentage completion for dataset/blocks subscribed
self.msConfig.setdefault("minPercentCompletion", 99)
# minimum available storage to consider a resource good for receiving data
self.msConfig.setdefault("minimumThreshold", 1 * (1000 ** 4)) # 1TB
# limit MSTransferor to this amount of requests per cycle
self.msConfig.setdefault("limitRequestsPerCycle", 500)
# Send warning messages for any data transfer above this threshold.
# Set to negative to ignore.
self.msConfig.setdefault("warningTransferThreshold", 100. * (1000 ** 4)) # 100TB
# weight expression for the input replication rules
self.msConfig.setdefault("rucioRuleWeight", 'ddm_quota')
# Workflows with open running timeout are used for growing input dataset, thus
# make a container level rule for the whole container whenever the open running
# timeout is larger than what is configured (or the default of 7 days below)
self.msConfig.setdefault("openRunning", 7 * 24 * 60 * 60)
quotaAccount = self.msConfig["rucioAccount"]
self.rseQuotas = RSEQuotas(quotaAccount, self.msConfig["quotaUsage"],
minimumThreshold=self.msConfig["minimumThreshold"],
verbose=self.msConfig['verbose'], logger=logger)
self.reqInfo = RequestInfo(self.msConfig, self.rucio, self.logger)
self.cric = CRIC(logger=self.logger)
self.inputMap = {"InputDataset": "primary",
"MCPileup": "secondary",
"DataPileup": "secondary"}
self.uConfig = {}
self.campaigns = {}
self.psn2pnnMap = {}
self.pnn2psnMap = {}
self.dsetCounter = 0
self.blockCounter = 0
# service name used to route alerts via AlertManager
self.alertServiceName = "ms-transferor"
@retry(tries=3, delay=2, jitter=2)
def updateCaches(self):
"""
Fetch some data required for the transferor logic, e.g.:
* account limits from Rucio
* account usage from Rucio
* unified configuration
* all campaign configuration
* PSN to PNN map from CRIC
"""
self.logger.info("Updating RSE/PNN quota and usage")
self.rseQuotas.fetchStorageQuota(self.rucio)
self.rseQuotas.fetchStorageUsage(self.rucio)
self.rseQuotas.evaluateQuotaExceeded()
if not self.rseQuotas.getNodeUsage():
raise RuntimeWarning("Failed to fetch storage usage stats")
self.logger.info("Updating all local caches...")
self.dsetCounter = 0
self.blockCounter = 0
self.uConfig = self.unifiedConfig()
campaigns = self.reqmgrAux.getCampaignConfig("ALL_DOCS")
self.psn2pnnMap = self.cric.PSNtoPNNMap()
self.pnn2psnMap = self.cric.PNNtoPSNMap()
if not self.uConfig:
raise RuntimeWarning("Failed to fetch the unified configuration")
elif not campaigns:
raise RuntimeWarning("Failed to fetch the campaign configurations")
elif not self.psn2pnnMap:
raise RuntimeWarning("Failed to fetch PSN x PNN map from CRIC")
else:
# let's make campaign look-up easier and more efficient
self.campaigns = {}
for camp in campaigns:
self.campaigns[camp['CampaignName']] = camp
self.rseQuotas.printQuotaSummary()
def execute(self, reqStatus):
"""
Executes the whole transferor logic
:param reqStatus: request status to process
:return:
"""
counterWorkflows = 0
counterFailedRequests = 0
counterProblematicRequests = 0
counterSuccessRequests = 0
summary = dict(TRANSFEROR_REPORT)
self.logger.info("Service set to process up to %s requests per cycle.",
self.msConfig["limitRequestsPerCycle"])
try:
requestRecords = self.getRequestRecords(reqStatus)
self.updateReportDict(summary, "total_num_requests", len(requestRecords))
self.logger.info("Retrieved %s requests.", len(requestRecords))
except Exception as err: # general error
requestRecords = []
msg = "Unknown exception while fetching requests from ReqMgr2. Error: %s", str(err)
self.logger.exception(msg)
self.updateReportDict(summary, "error", msg)
try:
self.updateCaches()
self.updateReportDict(summary, "total_num_campaigns", len(self.campaigns))
self.updateReportDict(summary, "nodes_out_of_space", list(self.rseQuotas.getOutOfSpaceRSEs()))
except RuntimeWarning as ex:
msg = "All retries exhausted! Last error was: '%s'" % str(ex)
msg += "\nRetrying to update caches again in the next cycle."
self.logger.error(msg)
self.updateReportDict(summary, "error", msg)
return summary
except Exception as ex:
msg = "Unknown exception updating caches. Error: %s" % str(ex)
self.logger.exception(msg)
self.updateReportDict(summary, "error", msg)
return summary
# process all requests
for reqSlice in grouper(requestRecords, 100):
self.logger.info("Processing workflows from %d to %d.",
counterWorkflows + 1, counterWorkflows + len(reqSlice))
# get complete requests information
# based on Unified Transferor logic
reqResults = self.reqInfo(reqSlice)
self.logger.info("%d requests information completely processed.", len(reqResults))
for wflow in reqResults:
if not self.verifyCampaignExist(wflow):
counterProblematicRequests += 1
continue
# first, check whether any pileup dataset is already in place
self.checkPUDataLocation(wflow)
if wflow.getSecondarySummary() and not wflow.getPURSElist():
# then we still have pileup to be transferred, but with incorrect locations
self.alertPUMisconfig(wflow.getName())
counterProblematicRequests += 1
continue
# now check where input primary and parent blocks will need to go
self.checkDataLocation(wflow)
try:
success, transfers = self.makeTransferRequest(wflow)
except Exception as ex:
success = False
self.alertUnknownTransferError(wflow.getName())
msg = "Unknown exception while making transfer request for %s " % wflow.getName()
msg = "\tError: %s" % str(ex)
self.logger.exception(msg)
if success:
# then create a document in ReqMgr Aux DB
self.logger.info("Transfer requests successful for %s. Summary: %s",
wflow.getName(), pformat(transfers))
if self.createTransferDoc(wflow.getName(), transfers):
self.logger.info("Transfer document successfully created in CouchDB for: %s", wflow.getName())
# then move this request to staging status
self.change(wflow.getName(), 'staging', self.__class__.__name__)
counterSuccessRequests += 1
else:
counterFailedRequests += 1
self.alertTransferCouchDBError(wflow.getName())
else:
counterFailedRequests += 1
# it can go slightly beyond the limit. It's evaluated for every slice
if counterSuccessRequests >= self.msConfig["limitRequestsPerCycle"]:
msg = "Transferor succeeded acting on %d workflows in this cycle. " % counterSuccessRequests
msg += "Which exceeds the configuration limit set to: %s" % self.msConfig["limitRequestsPerCycle"]
self.logger.info(msg)
break
counterWorkflows += len(reqSlice)
self.logger.info("Summary for this cycle is:")
self.logger.info(" * there were %d problematic requests;", counterProblematicRequests)
self.logger.info(" * there were %d failed requests;", counterFailedRequests)
self.logger.info(" * there were %d successful requests;", counterSuccessRequests)
self.logger.info(" * a total of %d datasets were subscribed;", self.dsetCounter)
self.logger.info(" * a total of %d blocks were subscribed.", self.blockCounter)
self.updateReportDict(summary, "success_request_transition", counterSuccessRequests)
self.updateReportDict(summary, "failed_request_transition", counterFailedRequests)
self.updateReportDict(summary, "problematic_requests", counterProblematicRequests)
self.updateReportDict(summary, "num_datasets_subscribed", self.dsetCounter)
self.updateReportDict(summary, "num_blocks_subscribed", self.blockCounter)
self.updateReportDict(summary, "nodes_out_of_space", list(self.rseQuotas.getOutOfSpaceRSEs()))
return summary
def getRequestRecords(self, reqStatus):
"""
Queries ReqMgr2 for requests in a given status, sort them by priority
and return a subset of each request with important information for the
data placement algorithm.
"""
self.logger.info("Fetching requests in status: %s", reqStatus)
# get requests from ReqMgr2 data-service for given status
reqData = self.reqmgr2.getRequestByStatus([reqStatus], detail=True)
# we need to first put these requests in order of priority, as done for GQ...
orderedRequests = []
for requests in reqData:
orderedRequests = listvalues(requests)
orderedRequests.sort(key=itemgetter('RequestPriority'), reverse=True)
return orderedRequests
def verifyCampaignExist(self, wflow):
"""
Check whether the campaigns associated to all the input datasets
exist in the database.
:param wflow: a workflow object
:return: True if campaigns exist, False otherwise
"""
for dataIn in wflow.getDataCampaignMap():
if dataIn['campaign'] not in self.campaigns:
msg = "Workflow: %s has to transfer dataset: %s under the campaign: %s. "
msg += "This campaign does not exist and needs to be created. Skipping this workflow!"
self.logger.warning(msg, wflow.getName(), dataIn['name'], dataIn['campaign'])
return False
return True
def checkDataLocation(self, wflow):
"""
Check which data is already in place (according to the site lists
and pileup data location) and remove them from the data placement
if already available anywhere.
If workflow has XRootD/AAA enabled, data location can be outside of
the SiteWhitelist.
:param wflow: workflow object
:return: None
"""
if not wflow.getInputDataset():
return
wflowPnns = self._getPNNsFromPSNs(wflow.getSitelist())
primaryAAA = wflow.getReqParam("TrustSitelists")
msg = "Checking data location for request: %s, TrustSitelists: %s, request white/black list PNNs: %s"
self.logger.info(msg, wflow.getName(), primaryAAA, wflowPnns)
wflowPnns = wflow.getPURSElist()
for methodName in ("getPrimaryBlocks", "getParentBlocks"):
inputBlocks = getattr(wflow, methodName)()
self.logger.info("Request %s has %d initial blocks from %s",
wflow.getName(), len(inputBlocks), methodName)
for block, blockDict in listitems(inputBlocks): # dict can change size here
blockLocation = self._diskPNNs(blockDict['locations'])
if primaryAAA and blockLocation:
msg = "Primary/parent block %s already in place (via AAA): %s" % (block, blockLocation)
self.logger.info(msg)
inputBlocks.pop(block)
elif blockLocation:
commonLocation = wflowPnns & set(blockLocation)
if commonLocation:
self.logger.info("Primary/parent block %s already in place: %s", block, commonLocation)
inputBlocks.pop(block)
else:
self.logger.info("block: %s will need data placement!!!", block)
else:
self.logger.info("Primary/parent block %s not available in any disk storage", block)
self.logger.info("Request %s has %d final blocks from %s",
wflow.getName(), len(getattr(wflow, methodName)()), methodName)
def checkPUDataLocation(self, wflow):
"""
Check the workflow pileup current location, compare it to what is defined
in the campaign configuration and ensure that each location defined in the
campaign gets a rule created, regardless whether AAA is enabled or not.
Use the workflow sitelists and the expected pileup(s) location(s) to decide
where primary and parent data must be placed.
:param wflow: workflow object
:return: None
"""
pileupInput = wflow.getSecondarySummary()
if not pileupInput:
# nothing to be done here
return
psns = wflow.getSitelist()
wflowPnns = self._getPNNsFromPSNs(psns)
secAAA = wflow.getReqParam("TrustPUSitelists")
msg = "Checking secondary data location for request: {}, ".format(wflow.getName())
msg += "TrustPUSitelists: {}, request white/black list PNNs: {}".format(secAAA, wflowPnns)
self.logger.info(msg)
# this variable will contain a set of each pileup location, according
# to what has been defined in the campaign configuration. In the end,
# their intersection will be the final location for primary and parents
campBasedLocation = []
for dataIn in wflow.getDataCampaignMap():
if dataIn["type"] == "secondary":
dsetName = dataIn["name"]
campConfig = self.campaigns[dataIn['campaign']]
secSize = pileupInput[dsetName]['dsetSize']
secLocation = pileupInput[dsetName]['locations']
# and a special case for RelVal workflows, which do not define
# secondary datasets and their location
if wflow.isRelVal():
campSecLocations = wflowPnns
else:
campSecLocations = campConfig['Secondaries'].get(dsetName, [])
campBasedLocation.append(set(campSecLocations))
if not campSecLocations:
msg = "Workflow has been incorrectly assigned: %s. The secondary dataset: %s, "
msg += "belongs to the campaign: %s, which does not define the secondary "
msg += "dataset or it has defined an empty location list."
self.logger.error(msg, wflow.getName(), dsetName, dataIn['campaign'])
return
# compare the expected locations against the current availability
missingDestinations = list(set(campSecLocations) - set(secLocation))
msg = "it has secondary pileup: %s, with a total size of: %s GB, "
msg += "currently at: %s, campaign expected at: %s and missing replicas at: %s"
self.logger.info(msg, dsetName, gigaBytes(secSize), secLocation,
campSecLocations, missingDestinations)
if missingDestinations:
# then update this pileup location to get rule(s) on it
self.logger.info("pileup %s will get container rules on: %s", dsetName, missingDestinations)
pileupInput[dsetName]['locations'] = missingDestinations
else:
self.logger.info("pileup %s already available at the expected locations", dsetName)
# then remove it from the samples to get a data placement rule
pileupInput.pop(dsetName)
# consider the workflow sitelist for this final location
if len(campBasedLocation) == 1:
# then there is only one pileup dataset in the workflow
wflowFinalLocation = campBasedLocation[0] & wflowPnns
else:
# then there are multiple pileups, meaning possibly different
# campaigns and expected locations. Use their location
# intersection as final workflow destination
wflowFinalLocation = campBasedLocation[0].intersection(*campBasedLocation)
self.logger.info("Final location for workflow: %s is: %s", wflow.getName(), wflowFinalLocation)
wflow.setPURSElist(wflowFinalLocation)
def makeTransferRequest(self, wflow):
"""
Checks which input data has to be transferred, select the final destination if needed,
create the transfer record to be stored in Couch, and create the DM placement request.
This method does the following:
1. return if there is no workflow data to be transferred
2. check if the data input campaign is in the database, skip if not
3. _getValidSites: using the workflow site lists and the campaign configuration,
find a common list of sites (converted to PNNs). If the PNN is out of quota,
it's also removed from this list
4. create the transfer record dictionary
5. for every final node
5.1. if it's a pileup dataset, pick a random node and subscribe the whole container
5.2. else, retrieve chunks of blocks to be subscribed (evenly distributed)
5.3. update node usage with the amount of data subscribed
6. re-evaluate nodes with quota exceeded
7. return the transfer record, with a list of transfer IDs
:param wflow: workflow object
:return: boolean whether it succeeded or not, and a list of transfer records
"""
response = []
success = True
if not (wflow.getParentBlocks() or wflow.getPrimaryBlocks() or wflow.getSecondarySummary()):
self.logger.info("Request %s does not have any further data to transfer", wflow.getName())
return success, response
self.logger.info("Handling data subscriptions for request: %s", wflow.getName())
for dataIn in wflow.getDataCampaignMap():
dsetName = dataIn['name']
if dataIn["type"] == "parent":
msg = "Skipping 'parent' data subscription (done with the 'primary' data), for: %s" % dataIn
self.logger.info(msg)
continue
elif dataIn["type"] == "secondary" and dsetName not in wflow.getSecondarySummary():
# secondary already in place
continue
if wflow.getPURSElist() and not wflow.isRelVal():
rses = list(wflow.getPURSElist() & self.rseQuotas.getAvailableRSEs())
else:
rses = self._getValidSites(wflow, dataIn)
if not rses:
msg = "Workflow: %s can only run in RSEs with no available space: %s. "
msg += "Skipping this workflow until space gets released"
self.logger.warning(msg, wflow.getName(), wflow.getPURSElist())
return False, response
# create a transfer record data structure
transRec = newTransferRec(dataIn)
# figure out dids, number of copies and which grouping to use
if dataIn["type"] == "primary":
dids, didsSize = wflow.getInputData()
grouping = wflow.getRucioGrouping()
copies = wflow.getReplicaCopies()
if not dids:
# no valid files in any blocks, it will likely fail in global workqueue
self.logger.warning(" found 0 primary/parent blocks for dataset: %s, moving on...", dataIn['name'])
return success, response
# then it's secondary type
else:
# we can have multiple pileup datasets
puSummary = wflow.getSecondarySummary()
dids = [dsetName]
didsSize = puSummary[dsetName]['dsetSize']
grouping = "ALL"
# one replica for each RSE
copies = len(rses)
success, transferId = self.makeTransferRucio(wflow, dataIn, dids, didsSize,
grouping, copies, rses)
if not success:
# stop any other data placement for this workflow
msg = "There were failures transferring data for workflow: %s. Will retry again later."
self.logger.warning(msg, wflow.getName())
break
if transferId:
if isinstance(transferId, (set, list)):
transRec['transferIDs'].update(transferId)
else:
transRec['transferIDs'].add(transferId)
# and update some instance caches
if dataIn["type"] == "secondary":
self.dsetCounter += 1
else:
self.blockCounter += len(dids)
transRec['transferIDs'] = list(transRec['transferIDs'])
response.append(transRec)
return success, response
def makeTransferRucio(self, wflow, dataIn, dids, dataSize, grouping, copies, nodes):
"""
Creates a Rucio replication rule
:param wflow: the workflow object
:param dataIn: short summary of the data to be placed
:param dids: a list of the DIDs to be added to the rule
:param dataSize: amount of data being placed by this rule
:param grouping: whether blocks need to be placed altogether (ALL)
or if the can be scattered around (DATASET).
:param copies: integer with the number of copies to use in the rule
:param nodes: list of nodes/RSE
:return: a boolean flagging whether it succeeded or not, and the rule id
"""
success, transferId = True, set()
ruleAttrs = {'copies': copies,
'activity': 'Production Input',
'lifetime': self.msConfig['rulesLifetime'],
'account': self.msConfig['rucioAccount'],
'grouping': grouping,
'weight': self.msConfig['rucioRuleWeight'],
'meta': {'workflow_group': wflow.getWorkflowGroup()},
'comment': 'WMCore MSTransferor input data placement'}
rseExpr = "|".join(nodes)
if self.msConfig.get('enableDataTransfer', True):
# Force request-only subscription
# to any data transfer going above some threshold (do not auto-approve)
aboveWarningThreshold = (self.msConfig.get('warningTransferThreshold') > 0. and
dataSize > self.msConfig.get('warningTransferThreshold'))
# Then make the data subscription, for real!!!
self.logger.info("Creating rule for workflow %s with %d DIDs in container %s, RSEs: %s, grouping: %s",
wflow.getName(), len(dids), dataIn['name'], rseExpr, grouping)
try:
res = self.rucio.createReplicationRule(dids, rseExpr, **ruleAttrs)
except Exception as exc:
msg = "Hit a bad exception while creating replication rules for DID: %s. Error: %s"
self.logger.error(msg, dids, str(exc))
success = False
else:
if res:
# it could be that some of the DIDs already had such rule in
# place, so we might be retrieving a bunch of rule ids instead of
# a single one
self.logger.info("Rules successful created for %s : %s", dataIn['name'], res)
transferId.update(res)
# send an alert, if needed
self.alertLargeInputData(aboveWarningThreshold, transferId, wflow.getName(), dataSize, dataIn)
else:
self.logger.error("Failed to create rule for %s, will retry later", dids)
success = False
else:
msg = "DRY-RUN: making Rucio rule for workflow: %s, dids: %s, rse: %s, kwargs: %s"
self.logger.info(msg, wflow.getName(), dids, rseExpr, ruleAttrs)
return success, transferId
def alertPUMisconfig(self, workflowName):
"""
Send alert to Prometheus with PU misconfiguration error
"""
alertName = "{}: PU misconfiguration error. Workflow: {}".format(self.alertServiceName,
workflowName)
alertSeverity = "high"
alertSummary = "[MSTransferor] Workflow cannot proceed due to some PU misconfiguration."
alertDescription = "Workflow: {} could not proceed due to some PU misconfiguration,".format(workflowName)
alertDescription += "so it will be skipped."
self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,
self.alertServiceName)
self.logger.critical(alertDescription)
def alertUnknownTransferError(self, workflowName):
"""
Send alert to Prometheus with unknown transfer error
"""
alertName = "{}: Transfer request error. Workflow: {}".format(self.alertServiceName,
workflowName)
alertSeverity = "high"
alertSummary = "[MSTransferor] Unknown exception while making transfer request."
alertDescription = "Unknown exception while making Transfer request for workflow: {}".format(workflowName)
self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,
self.alertServiceName)
def alertTransferCouchDBError(self, workflowName):
"""
Send alert to Prometheus with CouchDB transfer error
"""
alertName = "{}: Failed to create a transfer document in CouchDB for workflow: {}".format(self.alertServiceName,
workflowName)
alertSeverity = "high"
alertSummary = "[MSTransferor] Transfer document could not be created in CouchDB."
alertDescription = "Workflow: {}, failed request due to error posting to CouchDB".format(workflowName)
self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,
self.alertServiceName)
self.logger.warning(alertDescription)
def alertLargeInputData(self, aboveWarningThreshold, transferId, wflowName, dataSize, dataIn):
"""
Evaluates whether the amount of data placed is too big, if so, send an alert
notification to a few persons
:param aboveWarningThreshold: boolean flag saying if the thresholds was exceeded or not
:param transferId: rule/transfer request id
:param wflowName: name of the workflow
:param dataSize: total amount of data subscribed
:param dataIn: short summary of the workflow data
"""
# Warn about data transfer subscriptions going above some threshold
if aboveWarningThreshold:
alertName = "{}: input data transfer over threshold: {}".format(self.alertServiceName,
wflowName)
alertSeverity = "high"
alertSummary = "[MS] Large pending data transfer under request id: {}".format(transferId)
alertDescription = "Workflow: {} has a large amount of ".format(wflowName)
alertDescription += "data subscribed: {} TB, ".format(teraBytes(dataSize))
alertDescription += "for {} data: {}.""".format(dataIn['type'], dataIn['name'])
self.sendAlert(alertName, alertSeverity, alertSummary, alertDescription,
self.alertServiceName)
self.logger.warning(alertDescription)
def _getValidSites(self, wflow, dataIn):
"""
Given a workflow object and the data short summary, find out
the Campaign name, the workflow SiteWhitelist, map the PSNs to
PNNs and finally remove PNNs without space
can still receive data
:param wflow: the workflow object
:param dataIn: short summary of data to be transferred
:return: a unique and ordered list of PNNs to take data
"""
campConfig = self.campaigns[dataIn['campaign']]
psns = wflow.getSitelist()
if dataIn["type"] == "primary":
if campConfig['SiteWhiteList']:
psns = set(psns) & set(campConfig['SiteWhiteList'])
if campConfig['SiteBlackList']:
psns = set(psns) - set(campConfig['SiteBlackList'])
self.logger.info(" final list of PSNs to be use: %s", psns)
pnns = self._getPNNsFromPSNs(psns)
if wflow.isRelVal():
self.logger.info("RelVal workflow '%s' ignores sites out of quota", wflow.getName())
return list(pnns)
self.logger.info("List of out-of-space RSEs dropped for '%s' is: %s",
wflow.getName(), pnns & self.rseQuotas.getOutOfSpaceRSEs())
return list(pnns & self.rseQuotas.getAvailableRSEs())
def createTransferDoc(self, reqName, transferRecords):
"""
Enrich the records returned from the data placement logic, wrap them up
in a single document and post it to CouchDB
:param reqName: the workflow name
:param transferRecords: list of dictionaries records, or empty if no input at all
:return: True if operation is successful, else False
"""
doc = newTransferDoc(reqName, transferRecords)
# Use the update/put method, otherwise it will fail if the document already exists
if self.reqmgrAux.updateTransferInfo(reqName, doc):
return True
self.logger.error("Failed to create transfer document in CouchDB. Will retry again later.")
return False
def _getPNNsFromPSNs(self, psnList):
"""
Given a list/set of PSNs, return a set of valid PNNs.
Note that T3, Tape and a few other PNNs are never returned.
"""
pnns = set()
for psn in psnList:
for pnn in self.psn2pnnMap.get(psn, []):
if pnn == "T2_CH_CERNBOX" or pnn.startswith("T3_"):
pass
elif pnn.endswith("_Tape") or pnn.endswith("_MSS") or pnn.endswith("_Export"):
pass
else:
pnns.add(pnn)
return pnns
def _getPSNsFromPNNs(self, pnnList):
"""
Given a list/set of PNNs, return a set of valid PSNs.
Note that T3 sites are never returned.
"""
psns = set()
for pnn in pnnList:
for psn in self.pnn2psnMap.get(pnn, []):
if psn.startswith("T3_"):
pass
else:
psns.add(psn)
return psns
def _diskPNNs(self, pnnList):
"""
Provided a list of PNN locations, return another list of
PNNs without mass storage and T3 sites
:param pnnList: list of PNN strings
:return: a set of strings with filtered out PNNs
"""
diskPNNs = set()
for pnn in pnnList:
if pnn == "T2_CH_CERNBOX" or pnn.startswith("T3_"):
pass
elif pnn.endswith("_Tape") or pnn.endswith("_MSS") or pnn.endswith("_Export"):
pass
else:
diskPNNs.add(pnn)
return diskPNNs | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/MicroService/MSTransferor/MSTransferor.py | 0.646906 | 0.172276 | MSTransferor.py | pypi |
from __future__ import division, print_function
from builtins import str as newstr, bytes, object
from future.utils import viewitems
from WMCore.MicroService.Tools.Common import getMSLogger, gigaBytes, teraBytes
class RSEQuotas(object):
"""
Class which represents a list of RSEs, their quota and
their storage usage
"""
def __init__(self, dataAcct, quotaFraction, **kwargs):
"""
Executes a basic setup, including proper logging.
:param dataAcct: string with the Rucio account
:param quotaFraction: float point number representing the fraction of the quota
:param kwargs: the supported keyword arguments are:
minimumThreshold: integer value defining the minimum available space required
verbose: logger verbosity
logger: logger object
"""
self.dataAcct = dataAcct
self.quotaFraction = quotaFraction
self.minimumSpace = kwargs["minimumThreshold"]
self.logger = getMSLogger(kwargs.get("verbose"), kwargs.get("logger"))
msg = "RSEQuotas started with parameters: dataAcct=%s, quotaFraction=%s, "
msg += "minimumThreshold=%s GB"
self.logger.info(msg, dataAcct, quotaFraction, gigaBytes(self.minimumSpace))
self.nodeUsage = {}
self.availableRSEs = set()
self.outOfSpaceNodes = set()
def __str__(self):
"""
Write out useful information for this object
:return: a stringified dictionary
"""
res = {'dataAcct': self.dataAcct, 'quotaFraction': self.quotaFraction,
'minimumSpace': self.minimumSpace}
return str(res)
def getNodeUsage(self):
"""
Return a dictionary of RSEs and a few storage statistics
"""
return self.nodeUsage
def getAvailableRSEs(self):
"""
Return a list of out-of-space RSE/PNNs
"""
return self.availableRSEs
def getOutOfSpaceRSEs(self):
"""
Return a list of out-of-space RSE/PNNs
"""
return self.outOfSpaceNodes
def fetchStorageQuota(self, dataSvcObj):
"""
Fetch the storage quota/limit for a given Rucio account.
:param dataSvcObj: object instance for the Rucio data service
:return: create an instance cache structure to keep track of quota
and available storage. The structure is as follows:
{"pnn_name": {"quota": quota in bytes for the rucio account,
"bytes_limit": total space for the account/group,
"bytes": amount of bytes currently used/archived,
"bytes_remaining": space remaining for the acct/group,
"quota_avail": a fraction of the quota that we will use}
"""
self.nodeUsage.clear()
response = dataSvcObj.getAccountLimits(self.dataAcct)
for rse, quota in viewitems(response):
if rse.endswith("_Tape") or rse.endswith("_Export"):
continue
self.nodeUsage.setdefault(rse, {})
self.nodeUsage[rse] = dict(quota=int(quota),
bytes_limit=int(quota),
bytes=0,
bytes_remaining=int(quota), # FIXME: always 0
quota_avail=0)
self.logger.info("Storage quota filled from Rucio")
def fetchStorageUsage(self, dataSvcObj):
"""
Fetch the storage usage from Rucio, which will then
be used as part of the data placement mechanism.
Also calculate the available quota - given the configurable quota
fraction - and mark RSEs with less than 1TB available as NOT usable.
:param dataSvcObj: object instance for the data service
Keys definition is:
* quota: the Rucio account limit
* bytes_limit: the account quota from Rucio
* bytes: data volume placed by Rucio
* bytes_remaining: storage available for our account/group
* quota_avail: space left (in bytes) that we can use for data placement
:return: update our cache in place with up-to-date values, in the format of:
{"pnn_name": {"bytes_limit": total space for the account/group,
"bytes": amount of bytes currently used/archived,
"bytes_remaining": space remaining for the acct/group}
"""
self.logger.info("Using Rucio for storage usage, with acct: %s", self.dataAcct)
for item in dataSvcObj.getAccountUsage(self.dataAcct):
if item['rse'] not in self.nodeUsage:
self.logger.warning("Rucio RSE: %s has data usage but no quota available.", item['rse'])
continue
# bytes_limit is always 0, so skip it and use whatever came from the limits call
# bytes_remaining is always negative, so calculate it based on the limits
quota = self.nodeUsage[item['rse']]['quota']
self.nodeUsage[item['rse']].update({'bytes': item['bytes'],
'bytes_remaining': quota - item['bytes']})
def evaluateQuotaExceeded(self):
"""
Goes through every single site, their quota and their remaining
storage; and mark those with less than X TB available (1TB at the
moment) as not eligible to receive data
:return: updates instance structures in place
"""
self.availableRSEs.clear()
self.outOfSpaceNodes.clear()
# given a configurable sub-fraction of our quota, recalculate how much storage is left
for rse, info in viewitems(self.nodeUsage):
quotaAvail = info['quota'] * self.quotaFraction
info['quota_avail'] = min(quotaAvail, info['bytes_remaining'])
if info['quota_avail'] < self.minimumSpace:
self.outOfSpaceNodes.add(rse)
else:
self.availableRSEs.add(rse)
self.logger.info("Currently %d nodes are out of space.", len(self.outOfSpaceNodes))
def printQuotaSummary(self):
"""
Print a summary of the current quotas, space usage and space available
"""
self.logger.info("Summary of the current quotas in Terabytes:")
for node in sorted(self.nodeUsage.keys()):
msg = " %s:\t\tbytes_limit: %.2f, bytes_used: %.2f, bytes_remaining: %.2f, "
msg += "quota: %.2f, quota_avail: %.2f"
self.logger.info(msg, node, teraBytes(self.nodeUsage[node]['bytes_limit']),
teraBytes(self.nodeUsage[node]['bytes']),
teraBytes(self.nodeUsage[node]['bytes_remaining']),
teraBytes(self.nodeUsage[node]['quota']),
teraBytes(self.nodeUsage[node]['quota_avail']))
self.logger.info("List of RSE's out of quota: %s", self.outOfSpaceNodes)
def updateNodeUsage(self, node, dataSize):
"""
Provided a RSE/PNN name and the data size, in bytes, update the node
storage usage by subtracting it from the current available quota.
If it gets a list of nodes, the same dataSize is accounted for all
of them.
:param node: string with the PNN/RSE
:param dataSize: integer with the amount of bytes allocated
:return: nothing. updates nodeUsage cache
"""
if isinstance(node, (newstr, bytes)):
node = [node]
if not isinstance(dataSize, int):
self.logger.error("dataSize needs to be integer, not '%s'!", type(dataSize))
for rse in node:
self.nodeUsage[rse]['quota_avail'] -= dataSize | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/MicroService/MSTransferor/DataStructs/RSEQuotas.py | 0.78572 | 0.421552 | RSEQuotas.py | pypi |
from WMCore.WMSpec.ConfigSectionTree import ConfigSectionTree, TreeHelper
from WMCore.WMSpec.Steps.StepFactory import getStepTypeHelper
class WMStepHelper(TreeHelper):
"""
_WMStepHelper_
Utils, methods and functions for manipulating the data in a WMStep
"""
def __init__(self, stepInstance):
TreeHelper.__init__(self, stepInstance)
def name(self):
return self.data._internal_name
def setStepType(self, stepType):
"""
_setStepType_
Set the type of the step.
"""
self.data.stepType = stepType
def stepType(self):
"""
_stepType_
Retrieve the step type.
"""
return self.data.stepType
def getNumberOfCores(self):
"""
_getNumberOfCores_
Return the number of cores for the step in question
"""
try:
return int(self.data.application.multicore.numberOfCores)
except Exception:
return 1
def getNumberOfStreams(self):
"""
_getNumberOfStreams_
Return the number of event streams for the step in question
"""
try:
return int(self.data.application.multicore.eventStreams)
except Exception:
return 0
def getGPURequired(self):
"""
Return whether GPU is required or not for this step object
"""
if hasattr(self.data.application, "gpu"):
if hasattr(self.data.application.gpu, "gpuRequired"):
return self.data.application.gpu.gpuRequired
return None
def getGPURequirements(self):
"""
Return the GPU requirements for this step object
"""
if hasattr(self.data.application, "gpu"):
if hasattr(self.data.application.gpu, "gpuRequirements"):
return self.data.application.gpu.gpuRequirements
return None
def addStep(self, stepName):
"""
_addStep_
Add a new step with the name provided to this step as a child
"""
node = WMStepHelper(WMStep(stepName))
self.addNode(node)
return node
def addTopStep(self, stepName):
"""
_addTopStep_
Add a new step with the name provided to this step as a child. This
will be the first top step of all the children.
"""
node = WMStepHelper(WMStep(stepName))
self.addTopNode(node)
return node
def getStep(self, stepName):
"""
_getStep_
Retrieve the named step and wrap it in a helper
"""
node = self.getNode(stepName)
if node == None:
return None
return WMStepHelper(node)
def setUserDN(self, userDN):
"""
_setUserDN_
Set the user DN
"""
self.data.userDN = userDN
def setAsyncDest(self, asyncDest):
"""
_setAsyncDest_
Set the async. destination
"""
self.data.asyncDest = asyncDest
def setUserRoleAndGroup(self, owner_vogroup, owner_vorole):
"""
_setUserRoleAndGroup_
Set user group and role.
"""
self.data.owner_vogroup = owner_vogroup
self.data.owner_vorole = owner_vorole
def getTypeHelper(self):
"""
_getTypeHelper_
Get a step type specific helper for this object using the StepFactory
"""
return getStepTypeHelper(self.data)
def addOverride(self, override, overrideValue):
"""
_addOverride_
Add overrides for use in step executors
"""
setattr(self.data.override, override, overrideValue)
return
def getOverrides(self):
"""
_getOverrides_
Get overrides for use in executors
"""
return self.data.override.dictionary_()
def getOutputModule(self, moduleName):
"""
_getOutputModule_
Get an output module from the step
Return None if non-existant
"""
if hasattr(self.data.output, 'modules'):
if hasattr(self.data.output.modules, moduleName):
return getattr(self.data.output.modules, moduleName)
return None
def setIgnoredOutputModules(self, moduleList):
"""
_setIgnoredOutputModules_
Set a list of output modules to be ignored,
only CMSSW steps will use this
"""
self.data.output.ignoredModules = moduleList
return
def setNewStageoutOverride(self, newValue):
"""
A toggle for steps to use old or new stageout code
"""
self.data.newStageout = newValue
def getNewStageoutOverride(self):
"""
A toggle for steps to use old or new stageout code
"""
if hasattr(self.data, 'newStageout'):
return self.data.newStageout
else:
return False
def getIgnoredOutputModules(self):
"""
_ignoreOutputModules_
Get a list of output modules to be ignored,
if the attribute is not set then return an empty list
"""
if hasattr(self.data.output, 'ignoredModules'):
return self.data.output.ignoredModules
return []
def getUserSandboxes(self):
if hasattr(self.data, 'user'):
if hasattr(self.data.user, 'inputSandboxes'):
return self.data.user.inputSandboxes
return []
def getUserFiles(self):
if hasattr(self.data, 'user'):
if hasattr(self.data.user, 'userFiles'):
return self.data.user.userFiles
return []
def setErrorDestinationStep(self, stepName):
"""
_setErrorDestinationStep_
In case of error, give the name of the step that
the execute process should go to.
"""
self.data.errorDestinationStep = stepName
return
def getErrorDestinationStep(self):
"""
_getErrorDestinationStep_
In case of error, get the step that should be
next in the process
"""
return getattr(self.data, 'errorDestinationStep', None)
def getConfigInfo(self):
"""
_getConfigInfo_
Get information about the config cache location
"""
cacheUrl = getattr(self.data.application.configuration, 'configCacheUrl', None)
cacheDb = getattr(self.data.application.configuration, 'cacheName', None)
configId = getattr(self.data.application.configuration, 'configId', None)
return cacheUrl, cacheDb, configId
def listAnalysisFiles(self):
"""
_listAnalysisFiles_
retrieve list of output module names
"""
if hasattr(self.data.output, "analysisFiles"):
return list(self.data.output.analysisFiles.dictionary_())
return []
def getAnalysisFile(self, name):
"""
_getAnalysisFile_
retrieve the data structure for an analysis file by name
None if not found
"""
return getattr(self.data.output.analysisFiles, name, None)
def getConfigCacheID(self):
"""
_getConfigCacheID_
If we have a configCacheID return it, otherwise return None
"""
return getattr(self.data.application.configuration, 'retrieveConfigUrl', None)
class WMStep(ConfigSectionTree):
"""
_WMStep_
Container for an executable unit within a Task
"""
def __init__(self, name):
ConfigSectionTree.__init__(self, name)
self.objectType = self.__class__.__name__
self.stepType = None
self.section_("application")
self.application.section_("controls")
self.application.section_("configuration")
self.section_("input")
self.input.section_("links")
self.section_("output")
self.section_("sandbox")
self.section_("emulator")
self.section_("override")
def makeWMStep(stepName):
"""
_makeWMStep_
Convienience method, instantiate a new WMStep with the name
provided and wrap it in a helper
"""
return WMStepHelper(WMStep(stepName)) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/WMStep.py | 0.68658 | 0.355691 | WMStep.py | pypi |
from builtins import str, bytes, object
from WMCore.Configuration import ConfigSection
def nodeName(node):
"""
_nodeName_
Util for extracting node name
"""
return node._internal_name
def nodeParent(node):
"""
_nodeParent_
Util for extracting Node Parent reference
"""
return node._internal_parent_ref
def listNodes(topNode):
"""
_listNodes_
Util to return a list of all node names in a NodeData structure
returns them in execution order
"""
result = []
result.append(nodeName(topNode))
for child in topNode.tree.childNames:
result.extend(listNodes(getattr(topNode.tree.children, child)))
return result
def listChildNodes(topNode):
"""
_listChildNodes_
ListNodes but without including the top node
"""
result = []
for child in topNode.tree.childNames:
result.extend(listNodes(getattr(topNode.tree.children, child)))
return result
def listFirstGenChildNodes(topNode):
"""
_listFirstGenChildNodes_
Return a list of the first generator child nodes.
"""
# no real need to sort it, but we better have the same order between Py2/Py3
return sorted(list(topNode.tree.childNames))
def nodeMap(node):
"""
_nodeMap_
Generate a map of node name to node data instance
Note: This will *not* preserve the execution order of the child nodes
and should not be used to traverse and operate on nodes where order matters
"""
result = {}
result[nodeName(node)] = node
for child in node.tree.childNames:
result.update(nodeMap(getattr(node.tree.children, child)))
return result
def findTopNode(node):
"""
_findTopNode_
Find the top node of a tree of nodes given an arbitrary node in the tree
Checks for non None parent, will also stop if the internal_treetop flag
is set for the parent
"""
parent = nodeParent(node)
if parent == None:
return node
if getattr(node, "_internal_treetop", False):
return node
if getattr(parent, "_internal_treetop", False):
return parent
return findTopNode(nodeParent(node))
def allNodeNames(node):
"""
_allNodeNames_
Find the top node in the tree and then get a list of all known names
"""
topNode = findTopNode(node)
return listNodes(topNode)
def addNode(currentNode, newNode):
"""
_addNode_
Add a child node to the current node provided
"""
newName = nodeName(newNode)
allNames = allNodeNames(currentNode)
if newName in allNames:
msg = "Duplicate Node Name %s already exists in tree\n" % newName
msg += "%s\n" % allNames
raise RuntimeError(msg)
setattr(currentNode.tree.children, newName, newNode)
currentNode.tree.childNames.append(newName)
newNode.tree.parent = nodeName(currentNode)
return
def addTopNode(currentNode, newNode):
"""
_addTopNode_
Add a child node to the current node provided but insert it
at the head of the childNames list.
"""
newName = nodeName(newNode)
allNames = allNodeNames(currentNode)
if newName in allNames:
msg = "Duplicate Node Name %s already exists in tree\n" % newName
msg += "%s\n" % allNames
raise RuntimeError(msg)
setattr(currentNode.tree.children, newName, newNode)
currentNode.tree.childNames.insert(0, newName)
newNode.tree.parent = nodeName(currentNode)
return
def deleteNode(topNode, childName):
"""
_deleteNode_
Given a node within the tree, delete the child
with the given name if it exists
"""
if hasattr(topNode.tree.children, childName):
delattr(topNode.tree.children, childName)
topNode.tree.childNames.remove(childName)
def getNode(node, nodeNameToGet):
"""
_getNode_
Given a node within the tree, find the node with the name provided &
return it.
returns None if not found
"""
topNode = findTopNode(node)
mapping = nodeMap(topNode)
return mapping.get(nodeNameToGet, None)
def findTop(node):
"""
_findTop_
Ignoring tree structure, find the top node that contains the node
provided.
Will work for any ConfigSection, not limited to ConfigSectionTree
"""
if node._internal_parent_ref == None:
return node
return findTop(node._internal_parent_ref)
def nodeIterator(node):
"""
_nodeIterator_
Generator function that delivers all nodes in order
"""
for i in listNodes(node):
yield getNode(node, i)
def nodeChildIterator(node):
"""
_nodeChildeIterator_
iterate over all nodes in order, except for the top node passed to this method
"""
for i in listChildNodes(node):
yield getNode(node, i)
def firstGenNodeChildIterator(node):
"""
_firstGenNodeChildIterator_
Iterator over all the first generation child nodes.
"""
for i in listFirstGenChildNodes(node):
yield getNode(node, i)
def format(value):
"""
_format_
format a value as python
keep parameters simple, trust python...
"""
if isinstance(value, (str, bytes)):
value = "\'%s\'" % value
return str(value)
def formatNative(value):
"""
_formatNative_
Like the format function, but allowing passing of ints, floats, etc.
"""
if isinstance(value, int):
return value
if isinstance(value, float):
return value
if isinstance(value, list):
return value
if isinstance(value, dict):
return value
else:
return format(value)
class TreeHelper(object):
"""
_TreeHelper_
Convienience wrapper for a ConfigSectionTree that provides
all the util methods as a wrapper class to avoid method name
and attribute collisions in the underlying ConfigSection
"""
def __init__(self, cfgSectTree):
self.data = cfgSectTree
def name(self):
"""get name of this node"""
return nodeName(self.data)
def setTopOfTree(self):
"""
flag this node as the top of the tree
"""
self.data._internal_treetop = True
def isTopOfTree(self):
"""
_isTopOfTree_
Determine if this section is the top of the tree.
"""
return self.data._internal_treetop
def listNodes(self):
"""list this node and all subnodes"""
return listNodes(self.data)
def getNode(self, nodeNameToGet):
"""get a node by name from this tree"""
return getNode(self.data, nodeNameToGet)
def getNodeWithHelper(self, nodeNameToGet):
"""get a node wrapped in a TreeHelper instance"""
return TreeHelper(getNode(self.data, nodeNameToGet))
def addNode(self, newNode):
"""
add a new Node, newNode can be a ConfigSectionTree or a TreeHelper
"""
if isinstance(newNode, TreeHelper):
return addNode(self.data, newNode.data)
return addNode(self.data, newNode)
def addTopNode(self, newNode):
"""
_addTopNode_
Add a child node to the current node provided but insert it
at the head of the childNames list.
"""
if isinstance(newNode, TreeHelper):
return addTopNode(self.data, newNode.data)
return addTopNode(self.data, newNode)
def deleteNode(self, nodeName):
"""
_deleteNode
Delete a child node given its name,
if it doesn't exists then do nothing
"""
deleteNode(self.data, nodeName)
return
def allNodeNames(self):
"""get list of all known node names in the tree containing this node"""
return allNodeNames(self.data)
def findTopNode(self):
"""get ref to the top node in the tree containing this node"""
return findTopNode(self.data)
def getTopConfigSection(self):
"""
_getTopConfigSection_
Ignore tree structure & fetch the absolute top of the pile
ConfigSection containing this node
"""
return findTop(self.data)
def nodeIterator(self):
"""
generator for processing all subnodes in execution order
"""
for i in listNodes(self.data):
yield getNode(self.data, i)
def nodeChildIterator(self):
"""
generator for processing all subnodes in execution order
"""
for i in listChildNodes(self.data):
yield getNode(self.data, i)
def firstGenNodeChildIterator(self):
"""
_fristGenNodeChildIterator_
Iterate over all the first generation child nodes.
"""
for i in listFirstGenChildNodes(self.data):
yield getNode(self.data, i)
def pythoniseDict(self, **options):
"""
convert self into dict of python format strings with
values in value position
"""
prefix = options.get('prefix', None)
sections = options.get('sections', False)
if prefix != None:
myName = "%s.%s" % (prefix, self.data._internal_name)
else:
myName = self.data._internal_name
result = {}
for attr in self.data._internal_settings:
if attr in self.data._internal_children:
if sections:
result["%s.section_(\'%s\')" % (myName, attr)] = '_Section_'
result.update(TreeHelper(getattr(self.data, attr)).pythoniseDict(prefix = myName))
continue
#This is potentially dangerous, because it adds lists, dicts.
result["%s.%s" %(myName, attr)] = formatNative(getattr(self.data, attr))
return result
def addValue(self, value):
"""
_addValue_
adds an arbitrary value as a dictionary. Can have multiple values
"""
if not isinstance(value, dict):
raise Exception("TreeHelper.addValue passed a value that was not a dictionary")
for key in value:
splitList = key.split('.')
setResult = value[key]
if len(splitList) == 1:
#Then there is only one level, and we put it here
setattr(self.data, key, setResult)
else:
if splitList[0] in self.data.listSections_():
#If the section exists, go to it directly
helper = TreeHelper(getattr(self.data, splitList[0]))
helper.addValue({splitList[1:]:setResult})
else:
#If the section doesn't exist, create it
self.data.section_(splitList[0])
helper = TreeHelper(getattr(self.data, splitList[0]))
helper.addValue({"".join(splitList[1:]):setResult})
return
class ConfigSectionTree(ConfigSection):
"""
_ConfigSectionTree_
Node Tree structure that can be embedded into a ConfigSection
structure
"""
def __init__(self, name):
ConfigSection.__init__(self, name)
self._internal_treetop = False
self.section_("tree")
self.tree.section_("children")
self.tree.childNames = []
self.tree.parent = None | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/ConfigSectionTree.py | 0.762513 | 0.50116 | ConfigSectionTree.py | pypi |
from builtins import object
from future.utils import viewvalues
import os
import logging
from WMCore.WMSpec.ConfigSectionTree import nodeName
class File(object):
"""
_File_
Class representing a file added to a directory.
Contains a source for the file and a name within
the directory
"""
def __init__(self, directory, name, source):
self.directory = directory
self.name = name
self.source = source
def path(self):
"""
_path_
Get name of this file within directory structure
"""
return "%s/%s" % (self.directory.path(), self.name)
def fetch(self, targetDir):
"""
_fetch_
Get the source and put it in the target dir.
Note: for now this uses cp, could use other
things based on source type, eg http:// etc etc
"""
command = "/bin/cp -rf %s %s/%s" % (self.source,
targetDir,
self.name)
logging.info("fetch:%s" % command)
os.system(command)
return
class Directory(object):
"""
_Directory_
structure representing a dir, to which files can be attached
"""
def __init__(self, name):
self.name = name
self.parent = None
self.children = {}
self.files = {}
self.physicalPath = None
def addDirectory(self, name):
"""
_addDirectory_
Add a new child Directory to this.
Return reference to new Directory instance
"""
if name in self.children:
return self.children[name]
self.children[name] = Directory(name)
self.children[name].parent = self
return self.children[name]
def addFile(self, source, targetName = None):
"""
_addFile_
Add a file to this directory.
The file will be pulled in from the source specified.
targetName is the optional name of the file in this
directory. If not specified, the basename of the file
will be used
"""
target = targetName
if target == None:
target = os.path.basename(source)
if target in self.files:
msg = "File %s already exists in directory %s" % (
self.name, target)
raise RuntimeError(msg)
newFile = File(self, target, source)
self.files[target] = newFile
return
def path(self):
"""
_path_
Get name of this dir within directory structure
"""
if self.parent == None:
return self.name
return "%s/%s" % (self.parent.path(), self.name)
def create(self, targetDir):
"""
_create_
Make this directory in the targetDirectory provided,
pull in all files and then recursively create any
children
"""
newDir = "%s/%s" % (targetDir, self.name)
logging.info("create(%s)" % newDir)
if not os.path.exists(newDir):
os.makedirs(newDir)
for f in viewvalues(self.files):
f.fetch(newDir)
for child in viewvalues(self.children):
child.create(newDir)
return
def __str__(self):
result = "%s\n" % self.path()
for f in viewvalues(self.files):
result += "%s ==> %s\n" % (f.path(), f.source)
for d in viewvalues(self.children):
result += str(d)
return result
def processDir( cfgSect, parent):
"""
_processDir_
Process a ConfigSection based directory
"""
for setting in cfgSect._internal_settings:
value = getattr(cfgSect, setting)
if not isinstance(value, dict): continue
parent.addFile(value['Source'], value['Target'])
for subdir in cfgSect._internal_children:
newsubdir = parent.addDirectory(subdir)
processDir(getattr(cfgSect, subdir), newsubdir)
return
def makeDirectory(step):
"""
_makeDirectory_
Create a Directory & file structure from the step provided
"""
dirs = getattr(step.build.directories, nodeName(step))
topDir = Directory(nodeName(step))
processDir(dirs, topDir)
return topDir | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/Steps/BuildTools.py | 0.440229 | 0.160167 | BuildTools.py | pypi |
from builtins import object
from WMCore.FwkJobReport.Report import FwkJobReportException
class DiagnosticHandler(object):
"""
_DiagnosticHandler_
Interface definition for handlers for a specific error condition
"""
def __call__(self, errorCode, executorInstance, **args):
"""
_operator(errCode, executor)_
Override to act on a particular error, use the executorInstance
to access things like the step, logfiles, and report.
Args will be used to provide extra information such as Exception
instances etc
"""
msg = "DiagnosticHandler.__call__ not "
msg += "implemented for class %s" % self.__class__.__name__
raise NotImplementedError(msg)
def parse(self, executorInstance, jobRepXml):
"""
Add an error to report if parsing the xml fails.
"""
try:
executorInstance.report.parse(jobRepXml, executorInstance.stepName)
except FwkJobReportException as ex:
# Job report is bad, the parse already puts a 50115 in the file
msg = "Error reading XML job report file, possibly corrupt XML File:\n"
msg += "Details: %s" % str(ex)
executorInstance.report.addError(executorInstance.stepName, 50115, "BadFWJRXML", msg)
raise
class DefaultDiagnosticHandler(DiagnosticHandler):
"""
_DefaultDiagnosticHandler_
Catch-all that just adds information to the report
"""
def __call__(self, errorCode, executorInstance, **args):
pass
class Diagnostic(object):
"""
_Diagnostic_
Base class for a Diagnostic implementation specific to a step type
Also works as a bare minimum Diagnostic if overriding is not needed
"""
def __init__(self):
self.handlers = {}
self.defaultHandler = DefaultDiagnosticHandler()
def __call__(self, errCode, executor, **args):
"""
_operator(errCode, executor, args)_
Invoke the diagnostic to produce an error report
"""
handler = self.handlers.get(errCode, self.defaultHandler)
handler(errCode, executor, **args)
executor.saveReport() | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/Steps/Diagnostic.py | 0.500488 | 0.173393 | Diagnostic.py | pypi |
from builtins import object
import os
from WMCore.WMSpec.WMStep import WMStepHelper
from WMCore.WMSpec.ConfigSectionTree import nodeName
class CoreHelper(WMStepHelper):
"""
_CoreHelper_
Helper API for core settings
"""
def stepName(self):
"""
_stepName_
Get the name of the step
"""
return nodeName(self.data)
def addEnvironmentVariable(self, varname, setting):
"""
_addEnvironmentVariable_
add a key = value style setting to the environment for this
step
"""
setattr(self.data.environment.variables, varname, setting)
return
def addEnvironmentPath(self, pathname, setting):
"""
_addEnvironmentPath_
add a key = value1:value2:value3 environment setting to this step
"""
if getattr(self.data.environment.paths, pathname, None) == None:
setattr(self.data.environment.paths, pathname, [])
pathentry = getattr(self.data.environment.paths, pathname)
pathentry.append(setting)
return
def environment(self):
"""
_environment_
Get the environment settings for this step
"""
return self.data.environment
def setOverrideCatalog(self, overrideCatalog):
"""
_setOverrideCatalog_
set the override catalog needed at least at CERN to use production castor pools
"""
if overrideCatalog is not None:
self.data.application.overrideCatalog = overrideCatalog
def getOverrideCatalog(self):
"""
_getOverrideCatalog_
return the TFC specified in overrideCatalog.
"""
return getattr(self.data.application, "overrideCatalog", None)
def addDirectory(self, dirName):
"""
_addDirectory_
Add a subdirectory structure to the template that will be built by
the builder
"""
split = dirName.split("/")
split = [ x for x in split if x.strip() != "" ]
dirs = getattr(self.data.build.directories, self.stepName())
for subdir in split:
exists = getattr(dirs, subdir, None)
if exists == None:
dirs.section_(subdir)
dirs = getattr(dirs, subdir)
return dirs
def addFile(self, fileName, newLocation = None):
"""
_addFile_
Add a file to the job at build time. This file must be
a local filesystem file available at fileName.
An optional location within the step can be specified which
may include a path structure that gets translated into calls
to addDirectory
"""
dirs = getattr(self.data.build.directories, self.stepName())
if newLocation != None:
filename = os.path.basename(newLocation)
dirname = os.path.dirname(newLocation)
dirs = self.addDirectory(dirname)
setattr(dirs, filename, { "Source" : fileName, "Target" : filename})
else:
filename = os.path.basename(fileName)
setattr(dirs, filename, {"Target" : filename, "Source" : fileName })
return
def directoryStructure(self):
"""
_directoryStructure_
Util to retrieve the directory structure
"""
return self.data.build.directories
class Template(object):
"""
_Template_
Base interface definition for any WMStep Template
"""
def __init__(self):
pass
def __call__(self, step):
"""
_operator(step)_
Install the template on the step instance provided
"""
self.coreInstall(step)
self.install(step)
def coreInstall(self, step):
"""
_coreInstall_
Install attributes common to all steps
"""
# Environment settings to pass to the step
step.section_("environment")
step.environment.section_("variables")
step.environment.section_("paths")
# Directory structure and files to be included in the job
# beyond those that would be added by a Step Specific builder
# Step Specific subclasses can simply append to these to get files
# and dirs into the job
step.section_("build")
step.build.section_("directories")
step.build.directories.section_(nodeName(step))
def install(self, step):
"""
_install_
Override this method to install the required attributes
in the step Instance provided
"""
msg = "WMSpec.Steps.Template.install method not overridden in "
msg += "implementation: %s\n" % self.__class__.__name__
raise NotImplementedError(msg)
def helper(self, step):
"""
_helper_
Wrap the step instance in a helper class tailored to this particular
step type
"""
msg = "WMSpec.Steps.Template.helper method not overridden in "
msg += "implementation: %s\n" % self.__class__.__name__
raise NotImplementedError(msg) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/Steps/Template.py | 0.409575 | 0.213685 | Template.py | pypi |
from future.utils import viewitems
from WMCore.WMSpec.Steps.Template import Template
from WMCore.WMSpec.Steps.Template import CoreHelper
class LogCollectStepHelper(CoreHelper):
"""
_StageOutStepHelper_
Add API calls and helper methods to the basic WMStepHelper to specialise
for StageOut tasks
This is very similar to StageOut since they have essentially the same function
"""
def disableRetries(self):
"""
handy for testing, without the 10 minute retry loop
"""
self.data.retryCount = 1
self.data.retryDelay = 0
def addOutputDestination(self, lfn):
"""
Adds an out location to put a tarball of all logs
"""
def cmsswSetup(self, cmsswVersion, **options):
"""
_cmsswSetup_
Provide setup details for CMSSW.
cmsswVersion - required - version of CMSSW to use
Optional:
scramCommand - defaults to scramv1
scramProject - defaults to CMSSW
scramArch - optional scram architecture, defaults to None
buildArch - optional scram build architecture, defaults to None
softwareEnvironment - setup command to bootstrap scram,defaults to None
"""
self.data.application.setup.cmsswVersion = cmsswVersion
for k, v in viewitems(options):
setattr(self.data.application.setup, k, v)
return
def getScramArch(self):
"""
_getScramArch_
Retrieve the scram architecture used for this step.
"""
return self.data.application.setup.scramArch
def getCMSSWVersion(self):
"""
_getCMSSWVersion_
Retrieve the version of the framework used for this step.
"""
return self.data.application.setup.cmsswVersion
class LogCollect(Template):
"""
_LogCollect_
Tools for creating a template LogCollect Step
"""
def install(self, step):
step.stepType = "LogCollect"
step.section_("logs")
step.logcount = 0
step.retryCount = 3
step.retryDelay = 300
step.application.section_("setup")
step.application.setup.scramCommand = "scramv1"
step.application.setup.scramProject = "CMSSW"
step.application.setup.cmsswVersion = None
step.application.setup.scramArch = None
step.application.setup.buildArch = None
step.application.setup.softwareEnvironment = None
def helper(self, step):
"""
_helper_
Wrap the WMStep provided in the CMSSW helper class that
includes the ability to add and manipulate the details
of a CMSSW workflow step
"""
return LogCollectStepHelper(step) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/Steps/Templates/LogCollect.py | 0.746878 | 0.348811 | LogCollect.py | pypi |
from builtins import range
import os
from WMCore.WMSpec.Steps.Diagnostic import Diagnostic, DiagnosticHandler
class Exit70318(DiagnosticHandler):
def __call__(self, errCode, executor, **args):
msg = "Failed to upload a DQM file to the GUI server."
executor.report.addError(executor.stepName, 70318, "DQMUploadFailure", msg)
class DUExceptionHandler(DiagnosticHandler):
"""
_DUExceptionHandler_
Generic handler for the DQMUpload step
I have no idea what this should do
"""
def __call__(self, errCode, executor, **args):
"""
_operator()_
Twiddle thumbs, contemplate navel, toss coin
"""
jobRepXml = os.path.join(executor.step.builder.workingDir,
executor.step.output.jobReport)
if not os.path.exists(jobRepXml):
# no report => Error
msg = "No Job Report Found: %s" % jobRepXml
executor.report.addError(executor.stepName, 50115, "MissingJobReport", msg)
return
# job report XML exists, load the exception information from it
self.parse(executor, jobRepXml)
# make sure the report has the error in it
errSection = getattr(executor.report.report, "errors", None)
if errSection == None:
msg = "Job Report contains no error report, but DQMUpload exited non-zero: %s" % errCode
executor.report.addError(executor.stepName, 50116, "MissingErrorReport", msg)
else:
#check exit code in report is non zero
if executor.report.report.status == 0:
msg = "Job Report contains no error report, but DQMUpload exited non-zero: %s" % errCode
executor.report.addError(executor.stepName, 50116, "MissingErrorReport", msg)
return
class DQMUpload(Diagnostic):
def __init__(self):
Diagnostic.__init__(self)
self.handlers[70318] = Exit70318()
catchAll = DUExceptionHandler()
dummyOut = [self.handlers.__setitem__(x, catchAll) for x in range(0, 255) if x not in self.handlers] | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/Steps/Diagnostics/DQMUpload.py | 0.438064 | 0.191857 | DQMUpload.py | pypi |
from builtins import range
import os
from WMCore.WMSpec.Steps.Diagnostic import Diagnostic, DiagnosticHandler
class Exit60319(DiagnosticHandler):
def __call__(self, errCode, executor, **args):
msg = "Failed to copy AlcaHarvest condition file to target directory."
executor.report.addError(executor.stepName,
60319, "AlcaHarvestFailure", msg)
class AHExceptionHandler(DiagnosticHandler):
"""
_AHExceptionHandler_
Generic handler for the AlcaHarvest step
I have no idea what this should do
"""
def __call__(self, errCode, executor, **args):
"""
_operator()_
Twiddle thumbs, contemplate navel, toss coin
"""
jobRepXml = os.path.join(executor.step.builder.workingDir,
executor.step.output.jobReport)
if not os.path.exists(jobRepXml):
# no report => Error
msg = "No Job Report Found: %s" % jobRepXml
executor.report.addError(executor.stepName, 50115, "MissingJobReport", msg)
return
# job report XML exists, load the exception information from it
self.parse(executor, jobRepXml)
# make sure the report has the error in it
errSection = getattr(executor.report.report, "errors", None)
if errSection == None:
msg = "Job Report contains no error report, but AlcaHarvest exited non-zero: %s" % errCode
executor.report.addError(executor.stepName, 50116, "MissingErrorReport", msg)
else:
#check exit code in report is non zero
if executor.report.report.status == 0:
msg = "Job Report contains no error report, but AlcaHarvest exited non-zero: %s" % errCode
executor.report.addError(executor.stepName, 50116, "MissingErrorReport", msg)
return
class AlcaHarvest(Diagnostic):
def __init__(self):
Diagnostic.__init__(self)
self.handlers[60319] = Exit60319()
catchAll = AHExceptionHandler()
for x in range(0, 255):
if x not in self.handlers:
self.handlers.__setitem__(x, catchAll) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/Steps/Diagnostics/AlcaHarvest.py | 0.449876 | 0.152347 | AlcaHarvest.py | pypi |
from __future__ import division
import json
from future.utils import viewitems
from Utils.Utilities import makeList
from WMCore.WMSpec.StdSpecs.DataProcessing import DataProcessing
from WMCore.WMSpec.WMWorkloadTools import validateArgumentsCreate
class ReRecoWorkloadFactory(DataProcessing):
"""
_ReRecoWorkloadFactory_
Stamp out ReReco workflows.
"""
def buildWorkload(self):
"""
_buildWorkload_
Build the workload given all of the input parameters. At the very least
this will create a processing task and merge tasks for all the outputs
of the processing task.
Note that there will be LogCollect tasks created for each processing
task and Cleanup tasks created for each merge task.
"""
(self.inputPrimaryDataset, self.inputProcessedDataset,
self.inputDataTier) = self.inputDataset[1:].split("/")
workload = self.createWorkload()
workload.setDashboardActivity("reprocessing")
workload.setWorkQueueSplitPolicy("Block", self.procJobSplitAlgo,
self.procJobSplitArgs,
OpenRunningTimeout=self.openRunningTimeout)
procTask = workload.newTask("DataProcessing")
cmsswStepType = "CMSSW"
taskType = "Processing"
forceUnmerged = False
if self.transientModules:
# If we have at least one output module not being merged,
# we must force all the processing task to be unmerged
forceUnmerged = True
outputMods = self.setupProcessingTask(procTask, taskType,
self.inputDataset,
couchDBName=self.couchDBName,
configCacheUrl=self.configCacheUrl,
forceUnmerged=forceUnmerged,
configDoc=self.configCacheID,
splitAlgo=self.procJobSplitAlgo,
splitArgs=self.procJobSplitArgs,
stepType=cmsswStepType)
self.addLogCollectTask(procTask)
# no real need to sort it, but we better have the same order between Py2/Py3
for outputModuleName in sorted(list(outputMods)):
# Only merge the desired outputs
if outputModuleName not in self.transientModules:
self.addMergeTask(procTask, self.procJobSplitAlgo, outputModuleName)
else:
self.addCleanupTask(procTask, outputModuleName)
self.addSkims(workload)
# setting the parameters which need to be set for all the tasks
# sets acquisitionEra, processingVersion, processingString
workload.setTaskPropertiesFromWorkload()
# set the LFN bases (normally done by request manager)
# also pass runNumber (workload evaluates it)
workload.setLFNBase(self.mergedLFNBase, self.unmergedLFNBase,
runNumber=self.runNumber)
return workload
def addSkims(self, workload):
"""
_addSkims_
Add skims to the standard dataprocessing workload that was given.
Note that there will be LogCollect tasks created for each processing
task and Cleanup tasks created for each merge task.
"""
skimmableTasks = {}
procTask = workload.getTopLevelTask()[0]
for skimmableTask in procTask.childTaskIterator():
if skimmableTask.taskType() == "Merge":
skimmableTasks[skimmableTask.data.input.outputModule] = skimmableTask
# Now add the output modules that are not merged but may be skimmed
for outputModule in self.transientModules:
skimmableTasks[outputModule] = procTask
for skimConfig in self.skimConfigs:
skimmableTask = skimmableTasks[skimConfig["SkimInput"]]
skimTask = skimmableTask.addTask(skimConfig["SkimName"])
parentCmsswStep = skimmableTask.getStep("cmsRun1")
skimSizePerEvent = skimConfig["SizePerEvent"]
skimTimePerEvent = skimConfig["TimePerEvent"]
skimMemory = skimConfig["Memory"]
# Check that the splitting agrees, if the parent is event based then we must do WMBSMergeBySize
# With reasonable defaults
skimJobSplitAlgo = skimConfig["SkimJobSplitAlgo"]
skimJobSplitArgs = skimConfig["SkimJobSplitArgs"]
if skimmableTask.jobSplittingAlgorithm == "EventBased":
skimJobSplitAlgo = "WMBSMergeBySize"
skimJobSplitArgs = {"max_merge_size": self.maxMergeSize,
"min_merge_size": self.minMergeSize,
"max_merge_events": self.maxMergeEvents,
"max_wait_time": self.maxWaitTime}
# Define the input module
inputModule = "Merged"
if skimConfig["SkimInput"] in self.transientModules:
inputModule = skimConfig["SkimInput"]
outputMods = self.setupProcessingTask(skimTask, "Skim",
inputStep=parentCmsswStep,
inputModule=inputModule,
couchDBName=self.couchDBName,
configCacheUrl=self.configCacheUrl,
configDoc=skimConfig["ConfigCacheID"],
splitAlgo=skimJobSplitAlgo,
splitArgs=skimJobSplitArgs,
timePerEvent=skimTimePerEvent,
sizePerEvent=skimSizePerEvent,
memoryReq=skimMemory)
self.addLogCollectTask(skimTask, taskName="%sLogCollect" % skimConfig["SkimName"])
for outputModuleName in outputMods:
self.addMergeTask(skimTask, skimJobSplitAlgo, outputModuleName)
return
def __call__(self, workloadName, arguments):
"""
_call_
Create a ReReco workload with the given parameters.
"""
DataProcessing.__call__(self, workloadName, arguments)
# Arrange the skims in a skimConfig object (i.e. a list of skim configurations)
self.skimConfigs = []
skimIndex = 1
while "SkimName%s" % skimIndex in arguments:
skimConfig = {}
skimConfig["SkimName"] = arguments["SkimName%s" % skimIndex]
skimConfig["SkimInput"] = arguments["SkimInput%s" % skimIndex]
skimConfig["ConfigCacheID"] = arguments["Skim%sConfigCacheID" % skimIndex]
skimConfig["TimePerEvent"] = float(arguments.get("SkimTimePerEvent%s" % skimIndex, self.timePerEvent))
skimConfig["SizePerEvent"] = float(arguments.get("SkimSizePerEvent%s" % skimIndex, self.sizePerEvent))
skimConfig["Memory"] = float(arguments.get("SkimMemory%s" % skimIndex, self.memory))
skimConfig["SkimJobSplitAlgo"] = arguments.get("SkimSplittingAlgo%s" % skimIndex, "FileBased")
skimConfig["SkimJobSplitArgs"] = {"include_parents": True}
if skimConfig["SkimJobSplitAlgo"] == "FileBased":
skimConfig["SkimJobSplitArgs"]["files_per_job"] = int(arguments.get("SkimFilesPerJob%s" % skimIndex, 1))
elif skimConfig["SkimJobSplitAlgo"] in ["EventBased", "EventAwareLumiBased"]:
standardSkim = int((8.0 * 3600.0) / skimConfig["TimePerEvent"])
skimConfig["SkimJobSplitArgs"]["events_per_job"] = int(arguments.get("SkimEventsPerJob%s" % skimIndex, standardSkim))
if skimConfig["SkimJobSplitAlgo"] == "EventAwareLumiBased":
skimConfig["SkimJobSplitAlgo"]["job_time_limit"] = 48 * 3600 # 2 days
elif skimConfig["SkimJobSplitAlgo"] == "LumiBased":
skimConfig["SkimJobSplitArgs"]["lumis_per_job"] = int(arguments.get("SkimLumisPerJob%s" % skimIndex, 8))
self.skimConfigs.append(skimConfig)
skimIndex += 1
return self.buildWorkload()
@staticmethod
def getWorkloadCreateArgs():
baseArgs = DataProcessing.getWorkloadCreateArgs()
specArgs = {"RequestType": {"default": "ReReco", "optional": False},
"TransientOutputModules": {"default": [], "type": makeList,
"attr": "transientModules", "null": False}
}
baseArgs.update(specArgs)
DataProcessing.setDefaultArgumentsProperty(baseArgs)
return baseArgs
@staticmethod
def getSkimArguments():
"""
_getSkimArguments_
Skim arguments can be many of the same, it depends on the number
of defined skims. However we need to keep the same definition of its arguments
in a generic form. This method follows the same definition of getWorkloadCreateArgs in StdBase.
"""
skimArgs = {
"SkimName#N": {"default": None, "type": str,
"optional": False, "validate": None,
"null": False},
"SkimInput#N": {"default": None, "type": str,
"optional": False, "validate": None,
"null": False},
"Skim#NConfigCacheID": {"default": None, "type": str,
"optional": False, "validate": None,
"null": False},
"SkimTimePerEvent#N": {"default": None, "type": float,
"optional": True, "validate": lambda x: x > 0,
"null": False},
"SkimSizePerEvent#N": {"default": None, "type": float,
"optional": True, "validate": lambda x: x > 0,
"null": False},
"SkimMemory#N": {"default": None, "type": float,
"optional": True, "validate": lambda x: x > 0,
"null": False},
"SkimSplittingAlgo#N": {"default": None, "type": str,
"optional": True, "validate": None,
"null": False},
"SkimEventsPerJob#N": {"default": None, "type": int,
"optional": True, "validate": lambda x: x > 0,
"null": False},
"SkimLumisPerJob#N": {"default": 8, "type": int,
"optional": True, "validate": lambda x: x > 0,
"null": False},
"SkimFilesPerJob#N": {"default": 1, "type": int,
"optional": True, "validate": lambda x: x > 0,
"null": False}}
return skimArgs
def validateSchema(self, schema):
"""
_validateSchema_
Check for required fields, and some skim facts
"""
DataProcessing.validateSchema(self, schema)
mainOutputModules = list(self.validateConfigCacheExists(configID=schema["ConfigCacheID"],
configCacheUrl=schema['ConfigCacheUrl'],
couchDBName=schema["CouchDBName"],
getOutputModules=True))
# Skim facts have to be validated outside the usual master validation
skimSchema = {k: v for (k, v) in viewitems(schema) if k.startswith("Skim")}
skimArguments = self.getSkimArguments()
skimIndex = 1
skimInputs = set()
while "SkimName%s" % skimIndex in schema:
instanceArguments = {}
for argument in skimArguments:
realArg = argument.replace("#N", str(skimIndex))
instanceArguments[realArg] = skimArguments[argument]
try:
validateArgumentsCreate(skimSchema, instanceArguments)
# Validate GPU-related spec parameters
DataProcessing.validateGPUSettings(schema)
except Exception as ex:
self.raiseValidationException(str(ex))
self.validateConfigCacheExists(configID=schema["Skim%sConfigCacheID" % skimIndex],
configCacheUrl=schema['ConfigCacheUrl'],
couchDBName=schema["CouchDBName"],
getOutputModules=False)
if schema["SkimInput%s" % skimIndex] not in mainOutputModules:
error = "Processing config does not have the following output module: %s." % schema[
"SkimInput%s" % skimIndex]
self.raiseValidationException(msg=error)
skimInputs.add(schema["SkimInput%s" % skimIndex])
skimIndex += 1
# Validate that the transient output modules are used in a skim task
if "TransientOutputModules" in schema:
diffSet = set(schema["TransientOutputModules"]) - skimInputs
if diffSet:
self.raiseValidationException(
msg="A transient output module was specified but no skim was defined for it") | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/StdSpecs/ReReco.py | 0.662796 | 0.195057 | ReReco.py | pypi |
from Utils.Utilities import makeList, makeNonEmptyList
from WMCore.Lexicon import dataset, block, physicsgroup, cmsname
from WMCore.WMSpec.StdSpecs.StdBase import StdBase
class StoreResultsWorkloadFactory(StdBase):
"""
_StoreResultsWorkloadFactory_
Stamp out StoreResults workloads.
"""
def __call__(self, workloadName, arguments):
"""
_call_
Create a StoreResults workload with the given parameters.
"""
# first of all, we update the merged LFN based on the physics group
arguments['MergedLFNBase'] += "/" + arguments['PhysicsGroup'].lower()
StdBase.__call__(self, workloadName, arguments)
(inputPrimaryDataset, inputProcessedDataset, inputDataTier) = self.inputDataset[1:].split("/")
workload = self.createWorkload()
mergeTask = workload.newTask("StoreResults")
self.addRuntimeMonitors(mergeTask)
mergeTaskCmssw = mergeTask.makeStep("cmsRun1")
mergeTaskCmssw.setStepType("CMSSW")
mergeTaskStageOut = mergeTaskCmssw.addStep("stageOut1")
mergeTaskStageOut.setStepType("StageOut")
mergeTaskLogArch = mergeTaskCmssw.addStep("logArch1")
mergeTaskLogArch.setStepType("LogArchive")
self.addLogCollectTask(mergeTask, taskName="StoreResultsLogCollect")
mergeTask.setTaskType("Merge")
mergeTask.applyTemplates()
mergeTask.addInputDataset(name=self.inputDataset,
primary=inputPrimaryDataset,
processed=inputProcessedDataset,
tier=inputDataTier,
dbsurl=self.dbsUrl,
block_blacklist=self.blockBlacklist,
block_whitelist=self.blockWhitelist,
run_blacklist=self.runBlacklist,
run_whitelist=self.runWhitelist)
splitAlgo = "ParentlessMergeBySize"
mergeTask.setSplittingAlgorithm(splitAlgo,
max_merge_size=self.maxMergeSize,
min_merge_size=self.minMergeSize,
max_merge_events=self.maxMergeEvents)
mergeTaskCmsswHelper = mergeTaskCmssw.getTypeHelper()
mergeTaskCmsswHelper.cmsswSetup(self.frameworkVersion, softwareEnvironment="",
scramArch=self.scramArch)
mergeTaskCmsswHelper.setGlobalTag(self.globalTag)
mergeTaskCmsswHelper.setSkipBadFiles(True)
mergeTaskCmsswHelper.setDataProcessingConfig("do_not_use", "merge")
self.addOutputModule(mergeTask, "Merged",
primaryDataset=inputPrimaryDataset,
dataTier=self.dataTier,
filterName=None,
forceMerged=True)
workload.setLFNBase(self.mergedLFNBase, self.unmergedLFNBase)
workload.setDashboardActivity("StoreResults")
# setting the parameters which need to be set for all the tasks
# sets acquisitionEra, processingVersion, processingString
workload.setTaskPropertiesFromWorkload()
return workload
@staticmethod
def getWorkloadCreateArgs():
baseArgs = StdBase.getWorkloadCreateArgs()
specArgs = {"RequestType": {"default": "StoreResults", "optional": False},
"InputDataset": {"optional": False, "validate": dataset, "null": False},
"ConfigCacheID": {"optional": True, "null": True},
"DataTier": {"default": "USER", "type": str,
"optional": True, "validate": None,
"attr": "dataTier", "null": False},
"PhysicsGroup": {"default": "", "optional": False,
"null": False, "validate": physicsgroup},
"MergedLFNBase": {"default": "/store/results", "type": str,
"optional": True, "validate": None,
"attr": "mergedLFNBase", "null": False},
# site whitelist shouldn't be allowed, but let's make an exception for StoreResults
"SiteWhitelist": {"default": [], "type": makeNonEmptyList, "assign_optional": False,
"validate": lambda x: all([cmsname(y) for y in x])},
"BlockBlacklist": {"default": [], "type": makeList,
"optional": True, "validate": lambda x: all([block(y) for y in x]),
"attr": "blockBlacklist", "null": False},
"BlockWhitelist": {"default": [], "type": makeList,
"optional": True, "validate": lambda x: all([block(y) for y in x]),
"attr": "blockWhitelist", "null": False},
"RunBlacklist": {"default": [], "type": makeList,
"optional": True, "validate": lambda x: all([int(y) > 0 for y in x]),
"attr": "runBlacklist", "null": False},
"RunWhitelist": {"default": [], "type": makeList,
"optional": True, "validate": lambda x: all([int(y) > 0 for y in x]),
"attr": "runWhitelist", "null": False}}
baseArgs.update(specArgs)
StdBase.setDefaultArgumentsProperty(baseArgs)
return baseArgs | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/StdSpecs/StoreResults.py | 0.617167 | 0.277565 | StoreResults.py | pypi |
from __future__ import division
from Utils.Utilities import makeList
from WMCore.Lexicon import procstringT0
from WMCore.WMSpec.StdSpecs.StdBase import StdBase
class RepackWorkloadFactory(StdBase):
"""
_RepackWorkloadFactory_
Stamp out Repack workflows.
"""
def __init__(self):
StdBase.__init__(self)
self.inputPrimaryDataset = None
self.inputProcessedDataset = None
return
def buildWorkload(self):
"""
_buildWorkload_
Build the workload given all of the input parameters. At the very least
this will create a processing task and merge tasks for all the outputs
of the processing task.
Not that there will be LogCollect tasks created for each processing
task and Cleanup tasks created for each merge task.
"""
workload = self.createWorkload()
workload.setDashboardActivity("t0")
cmsswStepType = "CMSSW"
taskType = "Processing"
# complete output configuration
for output in self.outputs:
output['moduleLabel'] = "write_%s_%s" % (output['primaryDataset'],
output['dataTier'])
# finalize splitting parameters
mySplitArgs = self.repackSplitArgs.copy()
mySplitArgs['algo_package'] = "T0.JobSplitting"
repackTask = workload.newTask("Repack")
repackOutMods = self.setupProcessingTask(repackTask, taskType,
scenarioName=self.procScenario,
scenarioFunc="repack",
scenarioArgs={'outputs': self.outputs},
splitAlgo="Repack",
splitArgs=mySplitArgs,
stepType=cmsswStepType)
repackTask.setTaskType("Repack")
self.addLogCollectTask(repackTask)
for repackOutLabel in repackOutMods:
self.addRepackMergeTask(repackTask, repackOutLabel)
# setting the parameters which need to be set for all the tasks
# sets acquisitionEra, processingVersion, processingString
workload.setTaskPropertiesFromWorkload()
# set the LFN bases (normally done by request manager)
# also pass run number to add run based directories
workload.setLFNBase(self.mergedLFNBase, self.unmergedLFNBase,
runNumber=self.runNumber)
return workload
def addRepackMergeTask(self, parentTask, parentOutputModuleName):
"""
_addRepackMergeTask_
Create an repackmerge task for files produced by the parent task.
"""
mergeTask = parentTask.addTask("%sMerge%s" % (parentTask.name(), parentOutputModuleName))
self.addRuntimeMonitors(mergeTask)
mergeTaskCmssw = mergeTask.makeStep("cmsRun1")
mergeTaskCmssw.setStepType("CMSSW")
mergeTaskStageOut = mergeTaskCmssw.addStep("stageOut1")
mergeTaskStageOut.setStepType("StageOut")
mergeTaskLogArch = mergeTaskCmssw.addStep("logArch1")
mergeTaskLogArch.setStepType("LogArchive")
mergeTask.setTaskLogBaseLFN(self.unmergedLFNBase)
self.addLogCollectTask(mergeTask, taskName="%s%sMergeLogCollect" % (parentTask.name(), parentOutputModuleName))
mergeTask.applyTemplates()
parentTaskCmssw = parentTask.getStep("cmsRun1")
parentOutputModule = parentTaskCmssw.getOutputModule(parentOutputModuleName)
dataTier = getattr(parentOutputModule, "dataTier")
mergeTask.setInputReference(parentTaskCmssw, outputModule=parentOutputModuleName, dataTier=dataTier)
mergeTaskCmsswHelper = mergeTaskCmssw.getTypeHelper()
mergeTaskCmsswHelper.cmsswSetup(self.frameworkVersion, softwareEnvironment="",
scramArch=self.scramArch)
mergeTaskCmsswHelper.setErrorDestinationStep(stepName=mergeTaskLogArch.name())
mergeTaskCmsswHelper.setGlobalTag(self.globalTag)
mergeTaskCmsswHelper.setOverrideCatalog(self.overrideCatalog)
# mergeTaskStageHelper = mergeTaskStageOut.getTypeHelper()
# mergeTaskStageHelper.setMinMergeSize(0, 0)
mergeTask.setTaskType("Merge")
# finalize splitting parameters
mySplitArgs = self.repackMergeSplitArgs.copy()
mySplitArgs['algo_package'] = "T0.JobSplitting"
mergeTask.setSplittingAlgorithm("RepackMerge",
**mySplitArgs)
mergeTaskCmsswHelper.setDataProcessingConfig(self.procScenario, "merge")
self.addOutputModule(mergeTask, "Merged",
primaryDataset=getattr(parentOutputModule, "primaryDataset"),
dataTier=getattr(parentOutputModule, "dataTier"),
filterName=getattr(parentOutputModule, "filterName"),
forceMerged=True)
self.addOutputModule(mergeTask, "MergedError",
primaryDataset=getattr(parentOutputModule, "primaryDataset") + "-Error",
dataTier=getattr(parentOutputModule, "dataTier"),
filterName=getattr(parentOutputModule, "filterName"),
forceMerged=True)
self.addCleanupTask(parentTask, parentOutputModuleName, dataTier=getattr(parentOutputModule, "dataTier"))
return mergeTask
def __call__(self, workloadName, arguments):
"""
_call_
Create a Repack workload with the given parameters.
"""
StdBase.__call__(self, workloadName, arguments)
# Required parameters that must be specified by the Requestor.
self.outputs = arguments['Outputs']
# job splitting parameters
self.repackSplitArgs = {}
self.repackSplitArgs['maxSizeSingleLumi'] = arguments['MaxSizeSingleLumi']
self.repackSplitArgs['maxSizeMultiLumi'] = arguments['MaxSizeMultiLumi']
self.repackSplitArgs['maxInputEvents'] = arguments['MaxInputEvents']
self.repackSplitArgs['maxInputFiles'] = arguments['MaxInputFiles']
self.repackSplitArgs['maxLatency'] = arguments['MaxLatency']
self.repackMergeSplitArgs = {}
self.repackMergeSplitArgs['minInputSize'] = arguments['MinInputSize']
self.repackMergeSplitArgs['maxInputSize'] = arguments['MaxInputSize']
self.repackMergeSplitArgs['maxEdmSize'] = arguments['MaxEdmSize']
self.repackMergeSplitArgs['maxOverSize'] = arguments['MaxOverSize']
self.repackMergeSplitArgs['maxInputEvents'] = arguments['MaxInputEvents']
self.repackMergeSplitArgs['maxInputFiles'] = arguments['MaxInputFiles']
self.repackMergeSplitArgs['maxLatency'] = arguments['MaxLatency']
return self.buildWorkload()
@staticmethod
def getWorkloadCreateArgs():
baseArgs = StdBase.getWorkloadCreateArgs()
specArgs = {"RequestType": {"default": "Repack"},
"ConfigCacheID": {"optional": True, "null": True},
"Scenario": {"default": "fake", "attr": "procScenario"},
"GlobalTag": {"default": "fake"},
"ProcessingString": {"default": "", "validate": procstringT0},
"Outputs": {"type": makeList, "optional": False},
"MaxSizeSingleLumi": {"type": int, "optional": False},
"MaxSizeMultiLumi": {"type": int, "optional": False},
"MaxInputEvents": {"type": int, "optional": False},
"MaxInputFiles": {"type": int, "optional": False},
"MaxLatency": {"type": int, "optional": False},
"MinInputSize": {"type": int, "optional": False},
"MaxInputSize": {"type": int, "optional": False},
"MaxEdmSize": {"type": int, "optional": False},
"MaxOverSize": {"type": int, "optional": False},
}
baseArgs.update(specArgs)
StdBase.setDefaultArgumentsProperty(baseArgs)
return baseArgs
@staticmethod
def getWorkloadAssignArgs():
baseArgs = StdBase.getWorkloadAssignArgs()
specArgs = {
"Override": {"default": {"eos-lfn-prefix": "root://eoscms.cern.ch//eos/cms/store/logs/prod/recent/Repack"},
"type": dict},
}
baseArgs.update(specArgs)
StdBase.setDefaultArgumentsProperty(baseArgs)
return baseArgs | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/StdSpecs/Repack.py | 0.739234 | 0.185264 | Repack.py | pypi |
from __future__ import division
from future.utils import viewitems
from builtins import range
from Utils.Utilities import strToBool
import WMCore.WMSpec.Steps.StepFactory as StepFactory
from WMCore.Lexicon import primdataset, taskStepName
from WMCore.WMSpec.StdSpecs.StdBase import StdBase
from WMCore.WMSpec.WMWorkloadTools import (validateArgumentsCreate, parsePileupConfig,
checkMemCore, checkEventStreams, checkTimePerEvent)
from WMCore.WMSpec.WMSpecErrors import WMSpecFactoryException
class StepChainWorkloadFactory(StdBase):
"""
__StepChainWorkloadFactory__
Factory for StepChain workflows.
"""
def __init__(self):
"""
__init__
Setup parameters that will be later overwritten in the call,
otherwise pylint will complain about them.
"""
StdBase.__init__(self)
self.configCacheUrl = None
self.globalTag = None
self.frameworkVersion = None
self.scramArch = None
self.couchDBName = None
self.stepChain = None
self.sizePerEvent = None
self.timePerEvent = None
self.primaryDataset = None
self.prepID = None
self.eventsPerJob = None
self.eventsPerLumi = None
# stepMapping is going to be used during assignment for properly mapping
# the arguments to each step/cmsRun
self.stepMapping = {}
self.stepParentageMapping = {}
def __call__(self, workloadName, arguments):
"""
__call__
Create a StepChain workload with the given parameters.
Configures the workload based on the first task information,
then properly sets up the remaining tasks.
"""
StdBase.__call__(self, workloadName, arguments)
self.workload = self.createWorkload()
# Update the task configuration
taskConf = {}
for k, v in viewitems(arguments["Step1"]):
taskConf[k] = v
self.modifyTaskConfiguration(taskConf, True, 'InputDataset' not in taskConf)
self.inputPrimaryDataset = self.getStepValue('PrimaryDataset', taskConf, self.primaryDataset)
self.blockBlacklist = taskConf["BlockBlacklist"]
self.blockWhitelist = taskConf["BlockWhitelist"]
self.runBlacklist = taskConf["RunBlacklist"]
self.runWhitelist = taskConf["RunWhitelist"]
self.splittingAlgo = taskConf['SplittingAlgo']
# Create the first task
firstTask = self.workload.newTask(taskConf['StepName'])
# it has to be called before the other steps are created
self.createStepMappings(arguments)
# Create a proper task and set workload level arguments
startPolicy = self.decideWorkQueueStartPolicy(arguments)
self.workload.setWorkQueueSplitPolicy(startPolicy, taskConf['SplittingAlgo'],
taskConf['SplittingArguments'],
OpenRunningTimeout=self.openRunningTimeout)
if startPolicy == "MonteCarlo":
self.workload.setDashboardActivity("production")
self.workload.setEndPolicy("SingleShot")
self.setupGeneratorTask(firstTask, taskConf)
else:
self.workload.setDashboardActivity("processing")
self.setupTask(firstTask, taskConf)
# Now modify this task to add the next steps
if self.stepChain > 1:
self.setupNextSteps(firstTask, arguments)
self.createStepParentageMappings(firstTask, arguments)
self.workload.setStepMapping(self.stepMapping)
self.workload.setStepParentageMapping(self.stepParentageMapping)
# and push the parentage map to the reqmgr2 workload cache doc
arguments['ChainParentageMap'] = self.workload.getChainParentageSimpleMapping()
# Feed values back to save in couch
if self.eventsPerJob:
arguments['Step1']['EventsPerJob'] = self.eventsPerJob
if self.eventsPerLumi:
arguments['Step1']['EventsPerLumi'] = self.eventsPerLumi
return self.workload
def createStepMappings(self, origArgs):
"""
_createStepMappings_
Create a simple map of StepName to Step and cmsRun number.
cmsRun numbers are sequential, just like the step number.
:param origArgs: arguments provided by the user + default spec args
:return: update a dictionary in place which is latter used to set a
`stepMapping` property in the workload object
"""
for i in range(1, self.stepChain + 1):
stepNumber = "Step%d" % i
stepName = origArgs[stepNumber]['StepName']
cmsRunNumber = "cmsRun%d" % i
self.stepMapping.setdefault(stepName, (stepNumber, cmsRunNumber))
def createStepParentageMappings(self, firstTaskO, origArgs):
"""
_createStepParentageMappings_
Create a dict struct with a mapping of step name to parent step. It
also includes a map of output datasets and parent dataset.
:param firstTaskO: a WMTask object with the top level StepChain task
:param origArgs: arguments provided by the user + default spec args
:return: update a dictionary in place which will be later set as a
WMWorkload property
"""
for i in range(1, self.stepChain + 1):
stepNumber = "Step%d" % i
stepName = origArgs[stepNumber]['StepName']
cmsRunNumber = "cmsRun%d" % i
self.stepParentageMapping.setdefault(stepName, {})
self.stepParentageMapping[stepName] = {'StepNumber': stepNumber,
'StepCmsRun': cmsRunNumber,
'ParentStepName': None,
'ParentStepNumber': None,
'ParentStepCmsRun': None,
'ParentDataset': None,
'OutputDatasetMap': {}}
if stepNumber == 'Step1':
self.stepParentageMapping[stepName]['ParentDataset'] = origArgs[stepNumber].get('InputDataset')
# set the OutputDatasetMap or empty if KeepOutput is False
if origArgs[stepNumber].get("KeepOutput", True):
stepHelper = firstTaskO.getStepHelper(cmsRunNumber)
for outputModuleName in stepHelper.listOutputModules():
outputModule = stepHelper.getOutputModule(outputModuleName)
outputDataset = "/%s/%s/%s" % (outputModule.primaryDataset,
outputModule.processedDataset,
outputModule.dataTier)
self.stepParentageMapping[stepName]['OutputDatasetMap'][outputModuleName] = outputDataset
if "InputStep" in origArgs[stepNumber]:
parentStepName = origArgs[stepNumber]["InputStep"]
self.stepParentageMapping[stepName]['ParentStepName'] = parentStepName
parentStepNumber = self.stepParentageMapping[parentStepName]['StepNumber']
self.stepParentageMapping[stepName]['ParentStepNumber'] = parentStepNumber
parentStepCmsRun = self.stepParentageMapping[parentStepName]['StepCmsRun']
self.stepParentageMapping[stepName]['ParentStepCmsRun'] = parentStepCmsRun
parentOutputModName = origArgs[stepNumber]["InputFromOutputModule"]
parentDset = self.findParentStepWithOutputDataset(origArgs, parentStepNumber, parentStepName, parentOutputModName)
self.stepParentageMapping[stepName]['ParentDataset'] = parentDset
def findParentStepWithOutputDataset(self, origArgs, stepNumber, stepName, outModName):
"""
_findParentStepWithOutputDataset_
Given the parent step name and output module name, finds the parent dataset
:param origArgs: request arguments
:param stepNumber: step number of the parent step
:param stepName: step name of the parent step
:param outModName: output module name of the parent step
:return: the parent dataset name (str), otherwise None
"""
if origArgs[stepNumber].get("KeepOutput", True):
return self.stepParentageMapping[stepName]['OutputDatasetMap'][outModName]
else:
# then fetch grand-parent data
parentStepNumber = self.stepParentageMapping[stepName]['ParentStepNumber']
parentStepName = self.stepParentageMapping[stepName]['ParentStepName']
if parentStepNumber:
parentOutputModName = origArgs[stepNumber]["InputFromOutputModule"]
return self.findParentStepWithOutputDataset(origArgs, parentStepNumber, parentStepName, parentOutputModName)
else:
# this is Step1, return the InputDataset if any
return origArgs[stepNumber].get("InputDataset")
def setupGeneratorTask(self, task, taskConf):
"""
_setupGeneratorTask_
Set up an initial generator task for the top level step (cmsRun1)
"""
configCacheID = taskConf['ConfigCacheID']
splitAlgorithm = taskConf["SplittingAlgo"]
splitArguments = taskConf["SplittingArguments"]
outMods = self.setupProcessingTask(task, "Production", couchDBName=self.couchDBName,
configDoc=configCacheID, configCacheUrl=self.configCacheUrl,
splitAlgo=splitAlgorithm, splitArgs=splitArguments,
seeding=taskConf['Seeding'],
totalEvents=taskConf['RequestNumEvents'],
cmsswVersion=taskConf.get("CMSSWVersion", None),
scramArch=taskConf.get("ScramArch", None),
globalTag=taskConf.get("GlobalTag", None),
taskConf=taskConf)
if taskConf["PileupConfig"]:
self.setupPileup(task, taskConf['PileupConfig'], stepName="cmsRun1")
# outputModules were added already, we just want to create merge tasks here
if strToBool(taskConf.get('KeepOutput', True)):
self.setupMergeTask(task, taskConf, "cmsRun1", outMods)
return
def setupTask(self, task, taskConf):
"""
_setupTask_
Build the task using the setupProcessingTask from StdBase
and set the parents appropriately to handle a processing task,
It's only called for the top level task and top level step (cmsRun1)
"""
configCacheID = taskConf["ConfigCacheID"]
splitAlgorithm = taskConf["SplittingAlgo"]
splitArguments = taskConf["SplittingArguments"]
self.inputDataset = taskConf["InputDataset"]
# Use PD from the inputDataset if not provided in the task itself
if not self.inputPrimaryDataset:
self.inputPrimaryDataset = self.inputDataset[1:].split("/")[0]
outMods = self.setupProcessingTask(task, "Processing",
inputDataset=self.inputDataset, couchDBName=self.couchDBName,
configDoc=configCacheID, configCacheUrl=self.configCacheUrl,
splitAlgo=splitAlgorithm, splitArgs=splitArguments,
cmsswVersion=taskConf.get("CMSSWVersion", None),
scramArch=taskConf.get("ScramArch", None),
globalTag=taskConf.get("GlobalTag", None),
taskConf=taskConf)
lumiMask = taskConf.get("LumiList", self.workload.getLumiList())
if lumiMask:
task.setLumiMask(lumiMask)
if taskConf["PileupConfig"]:
self.setupPileup(task, taskConf['PileupConfig'], stepName="cmsRun1")
# outputModules were added already, we just want to create merge tasks here
if strToBool(taskConf.get('KeepOutput', True)):
self.setupMergeTask(task, taskConf, "cmsRun1", outMods)
return
def setupNextSteps(self, task, origArgs):
"""
_setupNextSteps_
Modify the step one task to include N more CMSSW steps and
chain the output between all three steps.
"""
self.stepParentageMapping.setdefault(origArgs['Step1']['StepName'], {})
for i in range(2, self.stepChain + 1):
currentStepNumber = "Step%d" % i
currentCmsRun = "cmsRun%d" % i
taskConf = {}
for k, v in viewitems(origArgs[currentStepNumber]):
taskConf[k] = v
parentStepNumber = self.stepMapping.get(taskConf['InputStep'])[0]
parentCmsRun = self.stepMapping.get(taskConf['InputStep'])[1]
parentCmsswStep = task.getStep(parentCmsRun)
parentCmsswStepHelper = parentCmsswStep.getTypeHelper()
# Set default values for the task parameters
self.modifyTaskConfiguration(taskConf, False, 'InputDataset' not in taskConf)
globalTag = self.getStepValue('GlobalTag', taskConf, self.globalTag)
frameworkVersion = self.getStepValue('CMSSWVersion', taskConf, self.frameworkVersion)
scramArch = self.getStepValue('ScramArch', taskConf, self.scramArch)
prepId = self.getStepValue('PrepID', taskConf, self.prepID)
currentCmssw = parentCmsswStep.addTopStep(currentCmsRun)
currentCmssw.setStepType("CMSSW")
template = StepFactory.getStepTemplate("CMSSW")
template(currentCmssw.data)
currentCmsswStepHelper = currentCmssw.getTypeHelper()
currentCmsswStepHelper.setPrepId(prepId)
currentCmsswStepHelper.setGlobalTag(globalTag)
currentCmsswStepHelper.setupChainedProcessing(parentCmsRun, taskConf['InputFromOutputModule'])
currentCmsswStepHelper.cmsswSetup(frameworkVersion, softwareEnvironment="", scramArch=scramArch)
currentCmsswStepHelper.setConfigCache(self.configCacheUrl, taskConf['ConfigCacheID'], self.couchDBName)
# multicore settings
multicore = self.multicore
eventStreams = self.eventStreams
if taskConf['Multicore'] > 0:
multicore = taskConf['Multicore']
if taskConf.get("EventStreams") is not None and taskConf['EventStreams'] >= 0:
eventStreams = taskConf['EventStreams']
currentCmsswStepHelper.setNumberOfCores(multicore, eventStreams)
# Pileup check
taskConf["PileupConfig"] = parsePileupConfig(taskConf["MCPileup"], taskConf["DataPileup"])
if taskConf["PileupConfig"]:
self.setupPileup(task, taskConf['PileupConfig'], stepName=currentCmsRun)
# Handling the output modules in order to decide whether we should
# stage them out and report them in the Report.pkl file
parentKeepOutput = strToBool(origArgs[parentStepNumber].get('KeepOutput', True))
parentCmsswStepHelper.keepOutput(parentKeepOutput)
childKeepOutput = strToBool(taskConf.get('KeepOutput', True))
currentCmsswStepHelper.keepOutput(childKeepOutput)
self.setupOutputModules(task, taskConf, currentCmsRun, childKeepOutput)
# Closing out the task configuration. The last step output must be saved/merged
currentCmsswStepHelper.keepOutput(True)
return
def getStepValue(self, keyName, stepDict, topLevelValue):
"""
Utilitarian method to reliably get the value of a step key
or fallback to the top level one.
"""
if keyName in stepDict and stepDict.get(keyName) is not None:
return stepDict.get(keyName)
else:
return topLevelValue
def setupOutputModules(self, task, taskConf, stepCmsRun, keepOutput):
"""
_setupOutputModules_
Retrieves the outputModules from the step configuration and sets up
a merge task for them. Only when KeepOutput is set to True.
"""
taskConf = taskConf or {}
outputMods = {}
configOutput = self.determineOutputModules(configDoc=taskConf["ConfigCacheID"],
configCacheUrl=self.configCacheUrl,
couchDBName=self.couchDBName)
for outputModuleName in configOutput:
outputModule = self.addOutputModule(task, outputModuleName,
self.inputPrimaryDataset,
configOutput[outputModuleName]["dataTier"],
configOutput[outputModuleName]["filterName"],
stepName=stepCmsRun, taskConf=taskConf)
outputMods[outputModuleName] = outputModule
if keepOutput:
self.setupMergeTask(task, taskConf, stepCmsRun, outputMods)
return
def setupMergeTask(self, task, taskConf, stepCmsRun, outputMods):
"""
_setupMergeTask_
Adds a merge task to the parent with the proper task configuration.
"""
frameworkVersion = taskConf.get("CMSSWVersion", self.frameworkVersion)
scramArch = taskConf.get("ScramArch", self.scramArch)
# PrepID has to be inherited from the workload level, not from task
if not taskConf.get('PrepID'):
taskConf['PrepID'] = self.prepID
for outputModuleName in outputMods:
dummyTask = self.addMergeTask(task, self.splittingAlgo, outputModuleName, stepCmsRun,
cmsswVersion=frameworkVersion, scramArch=scramArch,
forceTaskName=taskConf.get('StepName'), taskConf=taskConf)
return
def modifyTaskConfiguration(self, taskConf, firstTask=False, generator=False):
"""
_modifyTaskConfiguration_
Modify the TaskConfiguration according to the specifications
in getWorkloadCreateArgs and getChainCreateArgs. It does type
casting and assigns default values.
"""
baseArguments = self.getWorkloadCreateArgs()
for argument in baseArguments:
if argument in taskConf:
taskConf[argument] = baseArguments[argument]["type"](taskConf[argument])
taskArguments = self.getChainCreateArgs(firstTask, generator)
for argument in taskArguments:
if argument not in taskConf:
taskConf[argument] = taskArguments[argument]["default"]
else:
taskConf[argument] = taskArguments[argument]["type"](taskConf[argument])
taskConf["PileupConfig"] = parsePileupConfig(taskConf["MCPileup"], taskConf["DataPileup"])
if firstTask:
self.modifyJobSplitting(taskConf, generator)
return
def modifyJobSplitting(self, taskConf, generator):
"""
_modifyJobSplitting_
Adapt job splitting according to the first step configuration
or lack of some of them.
"""
if generator:
requestNumEvts = int(taskConf.get("RequestNumEvents", 0))
filterEff = taskConf.get("FilterEfficiency")
# Adjust totalEvents according to the filter efficiency
taskConf["SplittingAlgo"] = "EventBased"
taskConf["RequestNumEvents"] = int(requestNumEvts / filterEff)
taskConf["SizePerEvent"] = self.sizePerEvent * filterEff
taskConf["SplittingArguments"] = {}
if taskConf["SplittingAlgo"] in ["EventBased", "EventAwareLumiBased"]:
taskConf["EventsPerJob"], taskConf["EventsPerLumi"] = StdBase.calcEvtsPerJobLumi(taskConf.get("EventsPerJob"),
taskConf.get("EventsPerLumi"),
self.timePerEvent,
taskConf.get("RequestNumEvents"))
self.eventsPerJob = taskConf["EventsPerJob"]
self.eventsPerLumi = taskConf["EventsPerLumi"]
taskConf["SplittingArguments"]["events_per_job"] = taskConf["EventsPerJob"]
if taskConf["SplittingAlgo"] == "EventBased":
taskConf["SplittingArguments"]["events_per_lumi"] = taskConf["EventsPerLumi"]
else:
taskConf["SplittingArguments"]["job_time_limit"] = 48 * 3600 # 2 days
taskConf["SplittingArguments"]["lheInputFiles"] = taskConf["LheInputFiles"]
elif taskConf["SplittingAlgo"] == "LumiBased":
taskConf["SplittingArguments"]["lumis_per_job"] = taskConf["LumisPerJob"]
elif taskConf["SplittingAlgo"] == "FileBased":
taskConf["SplittingArguments"]["files_per_job"] = taskConf["FilesPerJob"]
taskConf["SplittingArguments"].setdefault("include_parents", taskConf['IncludeParents'])
taskConf["SplittingArguments"].setdefault("deterministicPileup", self.deterministicPileup)
return
@staticmethod
def getWorkloadCreateArgs():
baseArgs = StdBase.getWorkloadCreateArgs()
specArgs = {"RequestType": {"default": "StepChain", "optional": False},
"Step1": {"default": {}, "optional": False, "type": dict},
# ConfigCacheID is not used in the main dict for StepChain
"ConfigCacheID": {"optional": True, "null": True},
"DeterministicPileup": {"default": False, "type": strToBool, "optional": True, "null": False},
"PrimaryDataset": {"null": True, "validate": primdataset},
"StepChain": {"default": 1, "type": int, "null": False,
"optional": False, "validate": lambda x: x > 0},
"ChainParentageMap": {"default": {}, "type": dict},
"FirstEvent": {"default": 1, "type": int, "validate": lambda x: x > 0,
"null": False},
"FirstLumi": {"default": 1, "type": int, "validate": lambda x: x > 0,
"null": False},
"ParentageResolved": {"default": False, "type": strToBool, "null": False},
### Override StdBase parameter definition
"TimePerEvent": {"default": 12.0, "type": float, "validate": lambda x: x > 0},
"Memory": {"default": 2300.0, "type": float, "validate": lambda x: x > 0},
"Multicore": {"default": 1, "type": int, "validate": checkMemCore},
"EventStreams": {"type": int, "null": True, "default": 0, "validate": checkEventStreams}
}
baseArgs.update(specArgs)
StdBase.setDefaultArgumentsProperty(baseArgs)
return baseArgs
@staticmethod
def getChainCreateArgs(firstTask=False, generator=False):
"""
_getChainCreateArgs_
This represents the authoritative list of request arguments that are
allowed in each chain (Step/Task) of chained request, during request creation.
Additional especific arguments must be defined inside each spec class.
For more information on how these arguments are built, please have a look
at the docstring for getWorkloadCreateArgs.
"""
baseArgs = StdBase.getChainCreateArgs(firstTask, generator)
arguments = {
'InputStep': {'default': None, 'null': False, 'optional': firstTask},
'StepName': {'null': False, 'optional': False, 'validate': taskStepName},
'PrimaryDataset': {'default': None, 'optional': True,
'validate': primdataset, 'null': False}
}
baseArgs.update(arguments)
StdBase.setDefaultArgumentsProperty(baseArgs)
return baseArgs
@staticmethod
def getWorkloadAssignArgs():
baseArgs = StdBase.getWorkloadAssignArgs()
specArgs = {
"ChainParentageMap": {"default": {}, "type": dict},
### Override StdBase assignment parameter definition
"Memory": {"type": float, "validate": checkMemCore},
"Multicore": {"type": int, "validate": checkMemCore},
"EventStreams": {"type": int, "validate": checkEventStreams},
}
baseArgs.update(specArgs)
StdBase.setDefaultArgumentsProperty(baseArgs)
return baseArgs
def validateSchema(self, schema):
"""
_validateSchema_
Settings that are not supported and will cause workflow injection to fail, are:
* workflow with more than 10 steps
* output from the last step *must* be saved
* each step configuration must be a dictionary
* StepChain argument must reflect the number of Steps in the request
* trident configuration, where 2 steps have the same output module AND datatier
* usual ConfigCacheID validation
* and the usual Step arguments validation, as defined in the spec
"""
numSteps = schema['StepChain']
if numSteps > 10:
msg = "Workflow exceeds the maximum allowed number of steps. "
msg += "Limited to up to 10 steps, found %s steps." % numSteps
self.raiseValidationException(msg)
lastStep = "Step%s" % schema['StepChain']
if not strToBool(schema[lastStep].get('KeepOutput', True)):
msg = "Dropping the output (KeepOutput=False) of the last step is prohibited.\n"
msg += "You probably want to remove that step completely and try again."
self.raiseValidationException(msg=msg)
outputModTier = []
for i in range(1, numSteps + 1):
stepNumber = "Step%s" % i
if stepNumber not in schema:
msg = "Step '%s' not present in the request schema." % stepNumber
self.raiseValidationException(msg=msg)
step = schema[stepNumber]
# We can't handle non-dictionary steps
if not isinstance(step, dict):
msg = "Step '%s' not defined as a dictionary. " % stepNumber
msg += "It could be an indicator of JSON error.\n"
self.raiseValidationException(msg=msg)
# Generic step parameter validation
self.validateStep(step, self.getChainCreateArgs(i == 1, i == 1 and 'InputDataset' not in step))
# Validate the existence of the configCache
if step["ConfigCacheID"]:
self.validateConfigCacheExists(configID=step['ConfigCacheID'],
configCacheUrl=schema['ConfigCacheUrl'],
couchDBName=schema["CouchDBName"],
getOutputModules=False)
# we cannot save output of two steps using the same output module and datatier(s)
if strToBool(step.get("KeepOutput", True)):
configOutput = self.determineOutputModules(configDoc=step["ConfigCacheID"],
configCacheUrl=schema['ConfigCacheUrl'],
couchDBName=schema["CouchDBName"])
for modName, values in viewitems(configOutput):
thisOutput = (modName, values['dataTier'])
if thisOutput in outputModTier:
msg = "StepChain cannot save output of different steps using "
msg += "the same output module AND datatier(s)."
msg += "\n%s re-using outputModule: %s and datatier: %s" % (stepNumber,
modName,
values['dataTier'])
self.raiseValidationException(msg=msg)
outputModTier.append(thisOutput)
return
def validateStep(self, taskConf, taskArgumentDefinition):
"""
_validateStep_
Validate the step information against the given
argument description
"""
try:
validateArgumentsCreate(taskConf, taskArgumentDefinition, checkInputDset=False)
except WMSpecFactoryException:
# just re-raise it to keep the error message clear
raise
except Exception as ex:
self.raiseValidationException(str(ex))
return
def decideWorkQueueStartPolicy(self, reqDict):
"""
Given a request dictionary, decides which WorkQueue start
policy needs to be used in a given request.
:param reqDict: a dictionary with the creation request information
:return: a string with the start policy to be used.
"""
if not reqDict["Step1"].get("InputDataset"):
return "MonteCarlo"
inputDset = reqDict["Step1"]["InputDataset"]
if inputDset.endswith("/MINIAODSIM"):
return "Dataset"
else:
return "Block" | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/StdSpecs/StepChain.py | 0.7237 | 0.185394 | StepChain.py | pypi |
from __future__ import division
from Utils.Utilities import makeList
from WMCore.Lexicon import dataset, block, primdataset
from WMCore.WMSpec.StdSpecs.StdBase import StdBase
class DataProcessing(StdBase):
"""
_DataProcessing_
Base class for specs with input, it just defines some of the shared attributes
for this kind of WMSpec.
"""
def __call__(self, workloadName, arguments):
StdBase.__call__(self, workloadName, arguments)
# Handle the default of the various splitting algorithms
self.procJobSplitArgs = {"include_parents": self.includeParents}
if self.procJobSplitAlgo in ["EventBased", "EventAwareLumiBased"]:
if self.eventsPerJob is None:
self.eventsPerJob = int((8.0 * 3600.0) / self.timePerEvent)
if self.procJobSplitAlgo == "EventAwareLumiBased":
self.procJobSplitArgs["job_time_limit"] = 48 * 3600 # 2 days
self.procJobSplitArgs["events_per_job"] = self.eventsPerJob
arguments['EventsPerJob'] = self.eventsPerJob
elif self.procJobSplitAlgo == "LumiBased":
self.procJobSplitArgs["lumis_per_job"] = self.lumisPerJob
elif self.procJobSplitAlgo == "FileBased":
self.procJobSplitArgs["files_per_job"] = self.filesPerJob
return
@staticmethod
def getWorkloadCreateArgs():
baseArgs = StdBase.getWorkloadCreateArgs()
specArgs = {"InputDataset": {"optional": False, "validate": dataset, "null": False},
"Scenario": {"optional": True, "null": True, "attr": "procScenario"},
"PrimaryDataset": {"optional": True, "validate": primdataset,
"attr": "inputPrimaryDataset", "null": True},
"RunBlacklist": {"default": [], "type": makeList, "null": False,
"validate": lambda x: all([int(y) > 0 for y in x])},
"RunWhitelist": {"default": [], "type": makeList, "null": False,
"validate": lambda x: all([int(y) > 0 for y in x])},
"BlockBlacklist": {"default": [], "type": makeList,
"validate": lambda x: all([block(y) for y in x])},
"BlockWhitelist": {"default": [], "type": makeList,
"validate": lambda x: all([block(y) for y in x])},
"SplittingAlgo": {"default": "EventAwareLumiBased", "null": False,
"validate": lambda x: x in ["EventBased", "LumiBased",
"EventAwareLumiBased", "FileBased"],
"attr": "procJobSplitAlgo"},
"EventsPerJob": {"type": int, "validate": lambda x: x > 0, "null": True},
"LumisPerJob": {"default": 8, "type": int, "null": False,
"validate": lambda x: x > 0},
"FilesPerJob": {"default": 1, "type": int, "null": False,
"validate": lambda x: x > 0}
}
baseArgs.update(specArgs)
StdBase.setDefaultArgumentsProperty(baseArgs)
return baseArgs
@staticmethod
def getWorkloadAssignArgs():
baseArgs = StdBase.getWorkloadAssignArgs()
StdBase.setDefaultArgumentsProperty(baseArgs)
return baseArgs | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/StdSpecs/DataProcessing.py | 0.726037 | 0.30399 | DataProcessing.py | pypi |
from Utils.Utilities import makeList
from WMCore.Lexicon import couchurl, identifier, cmsname, dataset
from WMCore.WMSpec.StdSpecs.StdBase import StdBase
from WMCore.WMSpec.WMWorkload import WMWorkloadHelper
from WMCore.WMSpec.WMWorkloadTools import (loadSpecClassByType, validateArgumentsCreate,
checkMemCore, checkEventStreams, checkTimePerEvent)
class ResubmissionWorkloadFactory(StdBase):
"""
_ResubmissionWorkloadFactory_
Build Resubmission workloads.
"""
def buildWorkload(self, arguments):
"""
_buildWorkload_
Build a resubmission workload from a previous
workload, it loads the workload and truncates it.
"""
helper = WMWorkloadHelper()
# where to find the original spec file
originalRequestURL = "%s/%s" % (arguments['CouchURL'], arguments['CouchWorkloadDBName'])
helper.loadSpecFromCouch(originalRequestURL, self.originalRequestName)
helper.truncate(self.workloadName, self.initialTaskPath,
self.acdcServer, self.acdcDatabase,
self.collectionName)
helper.ignoreOutputModules(self.ignoredOutputModules)
# override a couple of parameters, if provided by user
# Note that if it was provided by the user, then it's already part of the arguments too
if "RequestPriority" in self.userArgs:
helper.setPriority(arguments["RequestPriority"])
if "Memory" in self.userArgs:
helper.setMemory(arguments["Memory"])
if "Multicore" in self.userArgs or "EventStreams" in self.userArgs:
self.setCoresAndStreams(helper, arguments)
if "TimePerEvent" in self.userArgs:
helper.setTimePerEvent(arguments.get("TimePerEvent"))
return helper
def __call__(self, workloadName, arguments):
StdBase.__call__(self, workloadName, arguments)
self.originalRequestName = self.initialTaskPath.split('/')[1]
return self.buildWorkload(arguments)
def factoryWorkloadConstruction(self, workloadName, arguments, userArgs=None):
"""
Resubmission factory override of the master StdBase factory.
Builds the entire workload, with specific features to Resubmission
requests, and also performs a sub-set of the standard validation.
:param workloadName: string with the name of the workload
:param arguments: dictionary with all the relevant create/assign parameters
:param userArgs: dictionary with user specific parameters
:return: the workload object
"""
self.userArgs = userArgs or {}
self.fixupArguments(arguments)
self.validateSchema(schema=arguments)
workload = self.__call__(workloadName, arguments)
self.validateWorkload(workload)
return workload
@staticmethod
def getWorkloadCreateArgs():
specArgs = {"RequestType": {"default": "Resubmission"},
"ResubmissionCount": {"default": 1, "type": int},
"OriginalRequestType": {"null": False},
"OriginalRequestName": {"null": False},
"InitialTaskPath": {"optional": False,
"validate": lambda x: len(x.split('/')) > 2},
"ACDCServer": {"default": "https://cmsweb.cern.ch/couchdb", "validate": couchurl,
"attr": "acdcServer"},
"ACDCDatabase": {"default": "acdcserver", "validate": identifier,
"attr": "acdcDatabase"},
"CollectionName": {"default": None, "null": True},
"IgnoredOutputModules": {"default": [], "type": makeList},
"SiteWhitelist": {"default": [], "type": makeList,
"validate": lambda x: all([cmsname(y) for y in x])},
# it can be Chained or MC requests, so lets make it optional
"InputDataset": {"optional": True, "validate": dataset, "null": True},
### Override StdBase parameter definition
"TimePerEvent": {"default": None, "type": float, "null": True, "validate": checkTimePerEvent},
"Memory": {"default": None, "type": float, "null": True, "validate": checkMemCore},
"Multicore": {"default": None, "type": int, "null": True, "validate": checkMemCore},
"EventStreams": {"default": None, "type": int, "null": True, "validate": checkEventStreams}
}
StdBase.setDefaultArgumentsProperty(specArgs)
return specArgs
def fixupArguments(self, arguments):
"""
This method will ensure that:
* if the user provided some specific arguments, it will be passed down the chain
* otherwise, the same argument from the original/parent workflow will be dumped
The only arguments to be tweaked like that are:
TimePerEvent, Memory, Multicore, EventStreams
:param arguments: full set of arguments from creation+assignment definitions
:return: nothing, updates are made in place
"""
if arguments["OriginalRequestType"] == "ReReco":
# top level arguments are already correct
return
specialArgs = ("TimePerEvent", "Memory", "Multicore", "EventStreams")
argsDefinition = self.getWorkloadCreateArgs()
for arg in specialArgs:
if arg in self.userArgs:
arguments[arg] = self.userArgs[arg]
# these should not be persisted under the Step dictionary
if arg in ("TimePerEvent", "Memory") and arguments["OriginalRequestType"] == "StepChain":
continue
elif arg in ("TimePerEvent", "Memory") and arguments["OriginalRequestType"] == "StepChain":
# there is only the top level argument, reuse it
continue
else:
arguments[arg] = argsDefinition[arg]["default"]
continue
# now update the inner values as well
specType = "Step" if arguments["OriginalRequestType"] == "StepChain" else "Task"
for innerIdx in range(1, arguments.get("{}Chain".format(specType), 0) + 1):
# innerKey is meant to be: Task1 or Step1, Task2 or Step2 ...
innerKey = "{}{}".format(specType, innerIdx)
# the value of either TaskName or StepName
innerName = arguments[innerKey]["{}Name".format(specType)]
# value to be defined inside the Task/Step
if isinstance(self.userArgs[arg], dict):
arguments[innerKey][arg] = self.userArgs[arg][innerName]
else:
arguments[innerKey][arg] = self.userArgs[arg]
def setCoresAndStreams(self, workloadHelper, inputArgs):
"""
Set helper for the Multicore and EventStreams parameters, which
need to be dealt with in a different way depending on the parent
spec type
:param workloadHelper: WMWorkload object
:param inputArgs: dictionary with the Resubmission input args
"""
# simple and easy way to update it
if not isinstance(inputArgs["Multicore"], dict):
workloadHelper.setCoresAndStreams(inputArgs["Multicore"], inputArgs.get("EventStreams", 0))
# still a simple way to update it
elif inputArgs['OriginalRequestType'] == "TaskChain":
workloadHelper.setCoresAndStreams(inputArgs["Multicore"], inputArgs.get("EventStreams", 0))
# check if it's a StepChain then based on its steps mapping
elif workloadHelper.getStepMapping():
# map is supposed to be in the format of:
# {'RecoPU_2021PU': ('Step1', 'cmsRun1'), 'Nano_2021PU': ('Step2', 'cmsRun2')}
stepChainMap = workloadHelper.getStepMapping()
# we need to create an easier map now
coresByCmsRun = {}
evtStreamsByCmsRun = {}
for stepName in stepChainMap:
cores = inputArgs["Multicore"][stepName]
coresByCmsRun[stepChainMap[stepName][1]] = cores
if inputArgs["EventStreams"] and isinstance(inputArgs["EventStreams"], dict):
streams = inputArgs["EventStreams"][stepName]
elif inputArgs["EventStreams"]:
streams = inputArgs["EventStreams"]
else:
streams = 0
evtStreamsByCmsRun[stepChainMap[stepName][1]] = streams
# Now iterate through the tasks and update it from within the steps
for task in workloadHelper.taskIterator():
if task.taskType() in ["Merge", "Harvesting", "Cleanup", "LogCollect"]:
continue
for cmsRunName in coresByCmsRun:
stepHelper = task.getStepHelper(cmsRunName)
stepHelper.setNumberOfCores(coresByCmsRun[cmsRunName],
evtStreamsByCmsRun[cmsRunName])
def validateSchema(self, schema):
"""
Since we skip the master validation for Resubmission specs, we better have
some specific validation
"""
if schema.get("ResubmissionCount", 1) > 1:
# we cannot validate such schema
return
# load assignment + creation + resubmission creation args definition
argumentDefinition = self.getWorkloadAssignArgs()
parentSpecClass = loadSpecClassByType(schema['OriginalRequestType'])
argumentDefinition.update(parentSpecClass.getWorkloadCreateArgs())
argumentDefinition.update(self.getWorkloadCreateArgs())
try:
validateArgumentsCreate(schema, argumentDefinition)
except Exception as ex:
self.raiseValidationException(str(ex))
# and some extra validation based on the parent workflow
if schema['OriginalRequestType'] != "TaskChain":
for param in ("TimePerEvent", "Memory"):
if isinstance(schema.get(param), dict):
msg = "ACDC for parent spec of type: {} ".format(schema['OriginalRequestType'])
msg += "cannot have parameter: {} defined as a dictionary: {}".format(param,
schema[param])
self.raiseValidationException(msg) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/WMSpec/StdSpecs/Resubmission.py | 0.642432 | 0.196363 | Resubmission.py | pypi |
from builtins import range
from WMCore.DataStructs.Run import Run
class Mask(dict):
"""
_Mask_
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
self.inclusive = True
self.setdefault("inclusivemask", True)
self.setdefault("FirstEvent", None)
self.setdefault("LastEvent", None)
self.setdefault("FirstLumi", None)
self.setdefault("LastLumi", None)
self.setdefault("FirstRun", None)
self.setdefault("LastRun", None)
self.setdefault("runAndLumis", {})
def setMaxAndSkipEvents(self, maxEvents, skipEvents):
"""
_setMaxAndSkipEvents_
Set FirstEvent & LastEvent fields as max & skip events
"""
self['FirstEvent'] = skipEvents
if maxEvents is not None:
self['LastEvent'] = skipEvents + maxEvents
return
def setMaxAndSkipLumis(self, maxLumis, skipLumi):
"""
_setMaxAndSkipLumis
Set the Maximum number of lumi sections and the starting point
"""
self['FirstLumi'] = skipLumi
self['LastLumi'] = skipLumi + maxLumis
return
def setMaxAndSkipRuns(self, maxRuns, skipRun):
"""
_setMaxAndSkipRuns
Set the Maximum number of runss and the starting point
"""
self['FirstRun'] = skipRun
self['LastRun'] = skipRun + maxRuns
return
def getMaxEvents(self):
"""
_getMaxEvents_
return maxevents setting
"""
if self['LastEvent'] is None or self['FirstEvent'] is None:
return None
return self['LastEvent'] - self['FirstEvent'] + 1
def getMax(self, keyType=None):
"""
_getMax_
returns the maximum number of runs/events/etc of the type of the type string
"""
if 'First%s' % (keyType) not in self:
return None
if self['First%s' % (keyType)] is None or self['Last%s' % (keyType)] is None:
return None
return self['Last%s' % (keyType)] - self['First%s' % (keyType)] + 1
def addRun(self, run):
"""
_addRun_
Add a run object
"""
run.lumis.sort()
firstLumi = run.lumis[0]
lastLumi = run.lumis[0]
for lumi in run.lumis:
if lumi <= lastLumi + 1:
lastLumi = lumi
else:
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
firstLumi = lumi
lastLumi = lumi
self.addRunAndLumis(run.run, lumis=[firstLumi, lastLumi])
return
def addRunWithLumiRanges(self, run, lumiList):
"""
_addRunWithLumiRanges_
Add to runAndLumis with call signature
addRunWithLumiRanges(run=run, lumiList = [[start1,end1], [start2, end2], ...]
"""
self['runAndLumis'][run] = lumiList
return
def addRunAndLumis(self, run, lumis=None):
"""
_addRunAndLumis_
Add runs and lumis directly
TODO: The name of this function is a little misleading. If you pass a list of lumis
it ignores the content of the list and adds a range based on the max/min in
the list. Missing lumis in the list are ignored.
NOTE: If the new run/lumi range overlaps with the pre-existing lumi ranges in the
mask, no attempt is made to merge these together. This can result in a mask
with duplicate lumis.
"""
lumis = lumis or []
if not isinstance(lumis, list):
lumis = list(lumis)
if run not in self['runAndLumis']:
self['runAndLumis'][run] = []
self['runAndLumis'][run].append([min(lumis), max(lumis)])
return
def getRunAndLumis(self):
"""
_getRunAndLumis_
Return list of active runs and lumis
"""
return self['runAndLumis']
def runLumiInMask(self, run, lumi):
"""
_runLumiInMask_
See if a particular runLumi is in the mask
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return True
if run not in self['runAndLumis']:
return False
for pair in self['runAndLumis'][run]:
# Go through each max and min pair
if pair[0] <= lumi and pair[1] >= lumi:
# Then the lumi is bracketed
return True
return False
def filterRunLumisByMask(self, runs):
"""
_filterRunLumisByMask_
Pass a Mask a list of run objects, get back a list of
run objects that correspond to the actual mask allowed values
"""
if self['runAndLumis'] == {}:
# Empty dictionary
# ALWAYS TRUE
return runs
runDict = {}
for r in runs:
if r.run in runDict:
runDict[r.run].extendLumis(r.lumis)
else:
runDict[r.run] = r
maskRuns = set(self["runAndLumis"].keys())
passedRuns = set([r.run for r in runs])
filteredRuns = maskRuns.intersection(passedRuns)
newRuns = set()
for runNumber in filteredRuns:
maskLumis = set()
for pair in self["runAndLumis"][runNumber]:
if pair[0] == pair[1]:
maskLumis.add(pair[0])
else:
maskLumis = maskLumis.union(list(range(pair[0], pair[1] + 1)))
filteredLumis = set(runDict[runNumber].lumis).intersection(maskLumis)
if len(filteredLumis) > 0:
filteredLumiEvents = [(lumi, runDict[runNumber].getEventsByLumi(lumi)) for lumi in filteredLumis]
newRuns.add(Run(runNumber, *filteredLumiEvents))
return newRuns | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/DataStructs/Mask.py | 0.704567 | 0.254295 | Mask.py | pypi |
from __future__ import absolute_import, division, print_function
from future.utils import listitems
import sys
import hashlib
import time
from functools import total_ordering
from Utils.Utilities import encodeUnicodeToBytes
from WMCore.DataStructs.WMObject import WMObject
@total_ordering
class WorkUnit(WMObject, dict):
"""
_WorkUnit_
Data object that contains details for a single work unit
corresponding to tables workunit and frl_workunit_assoc
"""
fieldsToCopy = ['taskid', 'retry_count', 'last_unit_count', 'last_submit_time', 'status', 'firstevent',
'lastevent', 'fileid']
fieldsForInfo = fieldsToCopy + ['run_lumi']
def __init__(self, taskID=None, retryCount=0, lastUnitCount=None, lastSubmitTime=int(time.time()),
status=0, firstEvent=1, lastEvent=sys.maxsize, fileid=None, runLumi=None):
super(WorkUnit, self).__init__(self)
self.setdefault('taskid', taskID)
self.setdefault('retry_count', retryCount)
self.setdefault('last_unit_count', lastUnitCount)
self.setdefault('last_submit_time', lastSubmitTime)
self.setdefault('status', status)
self.setdefault('firstevent', firstEvent)
self.setdefault('lastevent', lastEvent)
self.setdefault('fileid', fileid)
self.setdefault('run_lumi', runLumi)
def __lt__(self, rhs):
"""
Compare work units in task id, run, lumi, first event, last event
"""
if self['taskid'] != rhs['taskid']:
return self['taskid'] < rhs['taskid']
if self['run_lumi'].run != rhs['run_lumi'].run:
return self['run_lumi'].run < rhs['run_lumi'].run
if self['run_lumi'].lumis != rhs['run_lumi'].lumis:
return self['run_lumi'].lumis < rhs['run_lumi'].lumis
if self['first_event'] != rhs['first_event']:
return self['first_event'] < rhs['first_event']
return self['last_event'] < rhs['last_event']
def __eq__(self, rhs):
"""
Work unit is equal if it has the same task, run, and lumi
"""
return (self['taskid'] == rhs['taskid'] and self['run_lumi'].run == self['run_lumi'].run and
self['run_lumi'].lumis == self['run_lumi'].lumis and self['firstevent'] == rhs['firstevent'] and
self['lastevent'] == rhs['lastevent'])
def __hash__(self):
"""
Hash function for this dict.
"""
# Generate an immutable sorted string representing this object
# NOTE: the run object needs to be hashed
immutableSelf = []
for keyName in sorted(self):
if keyName == "run_lumi":
immutableSelf.append((keyName, hash(self[keyName])))
else:
immutableSelf.append((keyName, self[keyName]))
hashValue = hashlib.sha1(encodeUnicodeToBytes(str(immutableSelf)))
return int(hashValue.hexdigest()[:15], 16)
def json(self, thunker=None):
"""
_json_
Serialize the object. Only copy select fields and construct one new field.
"""
jsonDict = {k: self[k] for k in WorkUnit.fieldsToCopy}
jsonDict["run_lumi"] = {"run_number": self['run_lumi'].run, "lumis": self['run_lumi'].lumis}
return jsonDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker)
def getInfo(self):
"""
Returns: tuple of parameters for the work unit
"""
return tuple(self[x] for x in WorkUnit.fieldsForInfo) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/DataStructs/WorkUnit.py | 0.592195 | 0.15241 | WorkUnit.py | pypi |
from builtins import str, bytes
__all__ = []
from WMCore.DataStructs.Run import Run
from WMCore.DataStructs.WMObject import WMObject
class File(WMObject, dict):
"""
_File_
Data object that contains details for a single file
TODO
- use the decorator `from functools import total_ordering` after
dropping support for python 2.6
- then, drop __ne__, __le__, __gt__, __ge__
"""
def __init__(self, lfn="", size=0, events=0, checksums=None,
parents=None, locations=None, merged=False):
dict.__init__(self)
checksums = checksums or {}
self.setdefault("lfn", lfn)
self.setdefault("size", size)
self.setdefault("events", events)
self.setdefault("checksums", checksums)
self.setdefault('runs', set())
self.setdefault('merged', merged)
self.setdefault('last_event', 0)
self.setdefault('first_event', 0)
if locations is None:
self.setdefault("locations", set())
else:
self.setdefault("locations", locations)
if parents is None:
self.setdefault("parents", set())
else:
self.setdefault("parents", parents)
def addRun(self, run):
"""
_addRun_
run should be an instance of WMCore.DataStructs.Run
Add a run container to this file, tweak the run and lumi
keys to be max run and max lumi for backwards compat.
"""
if not isinstance(run, Run):
msg = "addRun argument must be of type WMCore.DataStructs.Run"
raise RuntimeError(msg)
addFlag = False
for runMember in self['runs']:
if runMember.run == run.run:
# this rely on Run object overwrite __add__ to update self
runMember + run
addFlag = True
if not addFlag:
self['runs'].add(run)
return
def load(self):
"""
A DataStructs file has nothing to load from, other implementations will
over-ride this method.
"""
if self['id']:
self['lfn'] = '/store/testing/%s' % self['id']
def save(self):
"""
A DataStructs file has nothing to save to, other implementations will
over-ride this method.
"""
pass
def setLocation(self, pnn):
# Make sure we don't add None, [], "" as file location
if pnn:
self['locations'] = self['locations'] | set(self.makelist(pnn))
def __eq__(self, rhs):
"""
File is equal if it has the same name
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] == rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] == rhs
return eq
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
thisHash = self['lfn'].__hash__()
return thisHash
def __lt__(self, rhs):
"""
Sort files based on lexicographical ordering of the value connected
to the 'lfn' key
"""
eq = False
if isinstance(rhs, type(self)):
eq = self['lfn'] < rhs['lfn']
elif isinstance(rhs, (str, bytes)):
eq = self['lfn'] < rhs
return eq
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __ge__(self, other):
return not self.__lt__(other)
def json(self, thunker=None):
"""
_json_
Serialize the file object. This will convert all Sets() to lists and
weed out the internal data structures that don't need to be shared.
"""
fileDict = {"last_event": self["last_event"],
"first_event": self["first_event"],
"lfn": self["lfn"],
"locations": list(self["locations"]),
"id": self.get("id", None),
"checksums": self["checksums"],
"events": self["events"],
"merged": self["merged"],
"size": self["size"],
"runs": [],
"parents": []}
for parent in self["parents"]:
if isinstance(parent, (str, bytes)):
# Then for some reason, we're passing strings
# Done specifically for ErrorHandler
fileDict['parents'].append(parent)
elif thunker is None:
continue
else:
fileDict["parents"].append(thunker._thunk(parent))
for run in self["runs"]:
runDict = {"run_number": run.run,
"lumis": run.lumis}
fileDict["runs"].append(runDict)
return fileDict
def __to_json__(self, thunker=None):
"""
__to_json__
This is the standard way we jsonize other objects.
Included here so we have a uniform method.
"""
return self.json(thunker) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/src/python/WMCore/DataStructs/File.py | 0.533884 | 0.174868 | File.py | pypi |
from __future__ import print_function, division
import logging
import sys
from collections import Counter
from future.utils import viewkeys, viewvalues
from WMCore.Services.DBS.DBS3Reader import DBS3Reader
from WMCore.Services.Rucio.Rucio import Rucio
RUCIO_ACCT = "wma_prod"
RUCIO_HOST = "http://cms-rucio.cern.ch"
RUCIO_AUTH = "https://cms-rucio-auth.cern.ch"
DBS_URL = "https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader"
def loggerSetup(logLevel=logging.INFO):
"""
Return a logger which writes everything to stdout.
"""
logger = logging.getLogger(__name__)
outHandler = logging.StreamHandler(sys.stdout)
outHandler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(module)s: %(message)s"))
outHandler.setLevel(logLevel)
logger.addHandler(outHandler)
logger.setLevel(logLevel)
return logger
def getFromRucio(dataset, logger):
"""
Using the WMCore Rucio object and fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, value is the amount of files.
"""
rucio = Rucio(acct=RUCIO_ACCT,
hostUrl=RUCIO_HOST,
authUrl=RUCIO_AUTH,
configDict={'logger': logger})
result = dict()
for block in rucio.getBlocksInContainer(dataset):
data = rucio.getDID(block)
result.setdefault(block, data['length'])
return result
def getFromDBS(dataset, logger):
"""
Uses the WMCore DBS3Reader object to fetch all the blocks and files
for a given container.
Returns a dictionary key'ed by the block name, and an inner dictionary
with the number of valid and invalid files. It also returns a total counter
for the number of valid and invalid files in the dataset.
"""
dbsReader = DBS3Reader(DBS_URL, logger)
result = dict()
dbsFilesCounter = Counter({'valid': 0, 'invalid': 0})
blocks = dbsReader.listFileBlocks(dataset)
for block in blocks:
data = dbsReader.dbs.listFileArray(block_name=block, validFileOnly=0, detail=True)
result.setdefault(block, Counter({'valid': 0, 'invalid': 0}))
for fileInfo in data:
if fileInfo['is_file_valid'] == 1:
result[block]['valid'] += 1
dbsFilesCounter['valid'] += 1
else:
result[block]['invalid'] += 1
dbsFilesCounter['invalid'] += 1
return result, dbsFilesCounter
def main():
"""
Expects a dataset name as input argument.
It then queries Rucio and DBS and compare their blocks and
number of files.
"""
if len(sys.argv) != 2:
print("A dataset name must be provided in the command line")
sys.exit(1)
datasetName = sys.argv[1]
logger = loggerSetup(logging.INFO)
rucioOutput = getFromRucio(datasetName, logger)
dbsOutput, dbsFilesCounter = getFromDBS(datasetName, logger)
logger.info("*** Dataset: %s", datasetName)
logger.info("Rucio file count : %s", sum(viewvalues(rucioOutput)))
logger.info("DBS file count : %s", dbsFilesCounter['valid'] + dbsFilesCounter['invalid'])
logger.info(" - valid files : %s", dbsFilesCounter['valid'])
logger.info(" - invalid files : %s", dbsFilesCounter['invalid'])
logger.info("Blocks in Rucio but not in DBS: %s", set(viewkeys(rucioOutput)) - set(viewkeys(dbsOutput)))
logger.info("Blocks in DBS but not in Rucio: %s", set(viewkeys(dbsOutput)) - set(viewkeys(rucioOutput)))
for blockname in rucioOutput:
if blockname not in dbsOutput:
logger.error("This block does not exist in DBS: %s", blockname)
continue
if rucioOutput[blockname] != sum(viewvalues(dbsOutput[blockname])):
logger.warning("Block with file mismatch: %s", blockname)
logger.warning("\tRucio: %s\t\tDBS: %s", rucioOutput[blockname], sum(viewvalues(dbsOutput[blockname])))
if __name__ == "__main__":
sys.exit(main()) | /reqmgr2ms-2.1.2rc4.tar.gz/reqmgr2ms-2.1.2rc4/bin/adhoc-scripts/checkDsetFileCount.py | 0.471953 | 0.28517 | checkDsetFileCount.py | pypi |
from textwrap import TextWrapper
from collections import OrderedDict
def twClosure(replace_whitespace=False,
break_long_words=False,
maxWidth=120,
maxLength=-1,
maxDepth=-1,
initial_indent=''):
"""
Deals with indentation of dictionaries with very long key, value pairs.
replace_whitespace: Replace each whitespace character with a single space.
break_long_words: If True words longer than width will be broken.
width: The maximum length of wrapped lines.
initial_indent: String that will be prepended to the first line of the output
Wraps all strings for both keys and values to 120 chars.
Uses 4 spaces indentation for both keys and values.
Nested dictionaries and lists go to next line.
"""
twr = TextWrapper(replace_whitespace=replace_whitespace,
break_long_words=break_long_words,
width=maxWidth,
initial_indent=initial_indent)
def twEnclosed(obj, ind='', depthReached=0, reCall=False):
"""
The inner function of the closure
ind: Initial indentation for the single output string
reCall: Flag to indicate a recursive call (should not be used outside)
"""
output = ''
if isinstance(obj, dict):
obj = OrderedDict(sorted(list(obj.items()),
key=lambda t: t[0],
reverse=False))
if reCall:
output += '\n'
ind += ' '
depthReached += 1
lengthReached = 0
for key, value in list(obj.items()):
lengthReached += 1
if lengthReached > maxLength and maxLength >= 0:
output += "%s...\n" % ind
break
if depthReached <= maxDepth or maxDepth < 0:
output += "%s%s: %s" % (ind,
''.join(twr.wrap(key)),
twEnclosed(value, ind, depthReached=depthReached, reCall=True))
elif isinstance(obj, (list, set)):
if reCall:
output += '\n'
ind += ' '
lengthReached = 0
for value in obj:
lengthReached += 1
if lengthReached > maxLength and maxLength >= 0:
output += "%s...\n" % ind
break
if depthReached <= maxDepth or maxDepth < 0:
output += "%s%s" % (ind, twEnclosed(value, ind, depthReached=depthReached, reCall=True))
else:
output += "%s\n" % str(obj) # join(twr.wrap(str(obj)))
return output
return twEnclosed
def twPrint(obj, maxWidth=120, maxLength=-1, maxDepth=-1):
"""
A simple caller of twClosure (see docstring for twClosure)
"""
twPrinter = twClosure(maxWidth=maxWidth,
maxLength=maxLength,
maxDepth=maxDepth)
print(twPrinter(obj))
def twFormat(obj, maxWidth=120, maxLength=-1, maxDepth=-1):
"""
A simple caller of twClosure (see docstring for twClosure)
"""
twFormatter = twClosure(maxWidth=maxWidth,
maxLength=maxLength,
maxDepth=maxDepth)
return twFormatter(obj) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/Utils/TwPrint.py | 0.757436 | 0.299387 | TwPrint.py | pypi |
import io
import os
import stat
import subprocess
import time
import zlib
from Utils.Utilities import decodeBytesToUnicode
def calculateChecksums(filename):
"""
_calculateChecksums_
Get the adler32 and crc32 checksums of a file. Return None on error
Process line by line and adjust for known signed vs. unsigned issues
http://docs.python.org/library/zlib.html
The cksum UNIX command line tool implements a CRC32 checksum that is
different than any of the python algorithms, therefore open cksum
in a subprocess and feed it the same chunks of data that are used
to calculate the adler32 checksum.
"""
adler32Checksum = 1 # adler32 of an empty string
cksumProcess = subprocess.Popen("cksum", stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# the lambda basically creates an iterator function with zero
# arguments that steps through the file in 4096 byte chunks
with open(filename, 'rb') as f:
for chunk in iter((lambda: f.read(4096)), b''):
adler32Checksum = zlib.adler32(chunk, adler32Checksum)
cksumProcess.stdin.write(chunk)
cksumProcess.stdin.close()
cksumProcess.wait()
cksumStdout = cksumProcess.stdout.read().split()
cksumProcess.stdout.close()
# consistency check on the cksum output
filesize = os.stat(filename)[stat.ST_SIZE]
if len(cksumStdout) != 2 or int(cksumStdout[1]) != filesize:
raise RuntimeError("Something went wrong with the cksum calculation !")
cksumStdout[0] = decodeBytesToUnicode(cksumStdout[0])
return (format(adler32Checksum & 0xffffffff, '08x'), cksumStdout[0])
def tail(filename, nLines=20):
"""
_tail_
A version of tail
Adapted from code on http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
assert nLines >= 0
pos, lines = nLines + 1, []
# make sure only valid utf8 encoded chars will be passed along
with io.open(filename, 'r', encoding='utf8', errors='ignore') as f:
while len(lines) <= nLines:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = list(f)
pos *= 2
text = "".join(lines[-nLines:])
return text
def getFileInfo(filename):
"""
_getFileInfo_
Return file info in a friendly format
"""
filestats = os.stat(filename)
fileInfo = {'Name': filename,
'Size': filestats[stat.ST_SIZE],
'LastModification': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_MTIME])),
'LastAccess': time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(filestats[stat.ST_ATIME]))}
return fileInfo
def findMagicStr(filename, matchString):
"""
_findMagicStr_
Parse a log file looking for a pattern string
"""
with io.open(filename, 'r', encoding='utf8', errors='ignore') as logfile:
# TODO: can we avoid reading the whole file
for line in logfile:
if matchString in line:
yield line
def getFullPath(name, envPath="PATH"):
"""
:param name: file name
:param envPath: any environment variable specified for path (PATH, PYTHONPATH, etc)
:return: full path if it is under PATH env
"""
for path in os.getenv(envPath).split(os.path.pathsep):
fullPath = os.path.join(path, name)
if os.path.exists(fullPath):
return fullPath
return None | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/Utils/FileTools.py | 0.555556 | 0.398641 | FileTools.py | pypi |
from builtins import str, bytes
def portForward(port):
"""
Decorator wrapper function for port forwarding of the REST calls of any
function to a given port.
Currently there are three constraints for applying this decorator.
1. The function to be decorated must be defined within a class and not being a static method.
The reason for that is because we need to be sure the function's signature will
always include the class instance as its first argument.
2. The url argument must be present as the second one in the positional argument list
of the decorated function (right after the class instance argument).
3. The url must follow the syntax specifications in RFC 1808:
https://tools.ietf.org/html/rfc1808.html
If all of the above constraints are fulfilled and the url is part of the
urlMangleList, then the url is parsed and the port is substituted with the
one provided as an argument to the decorator's wrapper function.
param port: The port to which the REST call should be forwarded.
"""
def portForwardDecorator(callFunc):
"""
The actual decorator
"""
def portMangle(callObj, url, *args, **kwargs):
"""
Function used to check if the url coming with the current argument list
is to be forwarded and if so change the port to the one provided as an
argument to the decorator wrapper.
:param classObj: This is the class object (slef from within the class)
which is always to be present in the signature of a
public method. We will never use this argument, but
we need it there for not breaking the positional
argument order
:param url: This is the actual url to be (eventually) forwarded
:param *args: The positional argument list coming from the original function
:param *kwargs: The keywords argument list coming from the original function
"""
forwarded = False
try:
if isinstance(url, str):
urlToMangle = 'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace('.cern.ch/', '.cern.ch:%d/' % port, 1)
forwarded = True
elif isinstance(url, bytes):
urlToMangle = b'https://cmsweb'
if url.startswith(urlToMangle):
newUrl = url.replace(b'.cern.ch/', b'.cern.ch:%d/' % port, 1)
forwarded = True
except Exception:
pass
if forwarded:
return callFunc(callObj, newUrl, *args, **kwargs)
else:
return callFunc(callObj, url, *args, **kwargs)
return portMangle
return portForwardDecorator
class PortForward():
"""
A class with a call method implementing a simple way to use the functionality
provided by the protForward decorator as a pure functional call:
EXAMPLE:
from Utils.PortForward import PortForward
portForwarder = PortForward(8443)
url = 'https://cmsweb-testbed.cern.ch/couchdb'
url = portForwarder(url)
"""
def __init__(self, port):
"""
The init method for the PortForward call class. This one is supposed
to simply provide an initial class instance with a logger.
"""
self.port = port
def __call__(self, url):
"""
The call method for the PortForward class
"""
def dummyCall(self, url):
return url
return portForward(self.port)(dummyCall)(self, url) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/Utils/PortForward.py | 0.825273 | 0.496277 | PortForward.py | pypi |
from builtins import object
from functools import reduce
class Functor(object):
"""
A simple functor class used to construct a function call which later to be
applied on an (any type) object.
NOTE:
It expects a function in the constructor and an (any type) object
passed to the run or __call__ methods, which methods once called they
construct and return the following function:
func(obj, *args, **kwargs)
NOTE:
All the additional arguments which the function may take must be set in
the __init__ method. If any of them are passed during run time an error
will be raised.
:func:
The function to which the rest of the constructor arguments are about
to be attached and then the newly created function will be returned.
- The function needs to take at least one parameter since the object
passed to the run/__call__ methods will always be put as a first
argument to the function.
:Example:
def adder(a, b, *args, **kwargs):
if args:
print("adder args: %s" % args)
if kwargs:
print("adder kwargs: %s" % kwargs)
res = a + b
return res
>>> x=Functor(adder, 8, 'foo', bar=True)
>>> x(2)
adder args: foo
adder kwargs: {'bar': True}
adder res: 10
10
>>> x
<Pipeline.Functor instance at 0x7f319bbaeea8>
"""
def __init__(self, func, *args, **kwargs):
"""
The init method for class Functor
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, obj):
"""
The call method for class Functor
"""
return self.run(obj)
def run(self, obj):
return self.func(obj, *self.args, **self.kwargs)
class Pipeline(object):
"""
A simple Functional Pipeline Class: applies a set of functions to an object,
where the output of every previous function is an input to the next one.
"""
# NOTE:
# Similar and inspiring approaches but yet some different implementations
# are discussed in the following two links [1] & [2]. With a quite good
# explanation in [1], which helped a lot. All in all at the bottom always
# sits the reduce function.
# [1]
# https://softwarejourneyman.com/python-function-pipelines.html
# [2]
# https://gitlab.com/mc706/functional-pipeline
def __init__(self, funcLine=None, name=None):
"""
:funcLine: A list of functions or Functors of function + arguments (see
the Class definition above) that are to be applied sequentially
to the object.
- If any of the elements of 'funcLine' is a function, a direct
function call with the object as an argument is performed.
- If any of the elements of 'funcLine' is a Functor, then the
first argument of the Functor constructor is the function to
be evaluated and the object is passed as a first argument to
the function with all the rest of the arguments passed right
after it eg. the following Functor in the funcLine:
Functor(func, 'foo', bar=True)
will result in the following function call later when the
pipeline is executed:
func(obj, 'foo', bar=True)
:Example:
(using the adder function from above and an object of type int)
>>> pipe = Pipeline([Functor(adder, 5),
Functor(adder, 6),
Functor(adder, 7, "extraArg"),
Functor(adder, 8, update=True)])
>>> pipe.run(1)
adder res: 6
adder res: 12
adder args: extraArg
adder res: 19
adder kwargs: {'update': True}
adder res: 27
"""
self.funcLine = funcLine or []
self.name = name
def getPipelineName(self):
"""
__getPipelineName__
"""
name = self.name or "Unnamed Pipeline"
return name
def run(self, obj):
return reduce(lambda obj, functor: functor(obj), self.funcLine, obj) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/Utils/Pipeline.py | 0.750918 | 0.499512 | Pipeline.py | pypi |
# system modules
import os
import ssl
import time
import logging
import traceback
# third part library
try:
import jwt
except ImportError:
traceback.print_exc()
jwt = None
from Utils.Utilities import encodeUnicodeToBytes
# prevent "SSL: CERTIFICATE_VERIFY_FAILED" error
# this will cause pylint warning W0212, therefore we ignore it above
ssl._create_default_https_context = ssl._create_unverified_context
def readToken(name=None):
"""
Read IAM token either from environment or file name
:param name: ether file name containing token or environment name which hold the token value.
If not provided it will be assumed to read token from IAM_TOKEN environment.
:return: token or None
"""
if name and os.path.exists(name):
token = None
with open(name, 'r', encoding='utf-8') as istream:
token = istream.read()
return token
if name:
return os.environ.get(name)
return os.environ.get("IAM_TOKEN")
def tokenData(token, url="https://cms-auth.web.cern.ch/jwk", audUrl="https://wlcg.cern.ch/jwt/v1/any"):
"""
inspect and extract token data
:param token: token string
:param url: IAM provider URL
:param audUrl: audience string
"""
if not token or not jwt:
return {}
if isinstance(token, str):
token = encodeUnicodeToBytes(token)
jwksClient = jwt.PyJWKClient(url)
signingKey = jwksClient.get_signing_key_from_jwt(token)
key = signingKey.key
headers = jwt.get_unverified_header(token)
alg = headers.get('alg', 'RS256')
data = jwt.decode(
token,
key,
algorithms=[alg],
audience=audUrl,
options={"verify_exp": True},
)
return data
def isValidToken(token):
"""
check if given token is valid or not
:param token: token string
:return: true or false
"""
tokenDict = {}
tokenDict = tokenData(token)
exp = tokenDict.get('exp', 0) # expire, seconds since epoch
if not exp or exp < time.time():
return False
return True
class TokenManager():
"""
TokenManager class handles IAM tokens
"""
def __init__(self,
name=None,
url="https://cms-auth.web.cern.ch/jwk",
audUrl="https://wlcg.cern.ch/jwt/v1/any",
logger=None):
"""
Token manager reads IAM tokens either from file or env.
It caches token along with expiration timestamp.
By default the env variable to use is IAM_TOKEN.
:param name: string representing either file or env where we should read token from
:param url: IAM provider URL
:param audUrl: audience string
:param logger: logger object or none to use default one
"""
self.name = name
self.url = url
self.audUrl = audUrl
self.expire = 0
self.token = None
self.logger = logger if logger else logging.getLogger()
try:
self.token = self.getToken()
except Exception as exc:
self.logger.exception("Failed to get token. Details: %s", str(exc))
def getToken(self):
"""
Return valid token and sets its expire timestamp
"""
if not self.token or not isValidToken(self.token):
self.token = readToken(self.name)
tokenDict = {}
try:
tokenDict = tokenData(self.token, url=self.url, audUrl=self.audUrl)
self.logger.debug(tokenDict)
except Exception as exc:
self.logger.exception(str(exc))
raise
self.expire = tokenDict.get('exp', 0)
return self.token
def getLifetime(self):
"""
Return reamaining lifetime of existing token
"""
return self.expire - int(time.time()) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/Utils/TokenManager.py | 0.66061 | 0.165863 | TokenManager.py | pypi |
from copy import copy
from builtins import object
from time import time
class MemoryCacheException(Exception):
def __init__(self, message):
super(MemoryCacheException, self).__init__(message)
class MemoryCache():
__slots__ = ["lastUpdate", "expiration", "_cache"]
def __init__(self, expiration, initialData=None):
"""
Initializes cache object
:param expiration: expiration time in seconds
:param initialData: initial value for the cache
"""
self.lastUpdate = int(time())
self.expiration = expiration
self._cache = initialData
def __contains__(self, item):
"""
Check whether item is in the current cache
:param item: a simple object (string, integer, etc)
:return: True if the object can be found in the cache, False otherwise
"""
return item in self._cache
def __getitem__(self, keyName):
"""
If the cache is a dictionary, return that item from the cache. Else, raise an exception.
:param keyName: the key name from the dictionary
"""
if isinstance(self._cache, dict):
return copy(self._cache.get(keyName))
else:
raise MemoryCacheException("Cannot retrieve an item from a non-dict MemoryCache object: {}".format(self._cache))
def reset(self):
"""
Resets the cache to its current data type
"""
if isinstance(self._cache, (dict, set)):
self._cache.clear()
elif isinstance(self._cache, list):
del self._cache[:]
else:
raise MemoryCacheException("The cache needs to be reset manually, data type unknown")
def isCacheExpired(self):
"""
Evaluate whether the cache has already expired, returning
True if it did, otherwise it returns False
"""
return self.lastUpdate + self.expiration < int(time())
def getCache(self):
"""
Raises an exception if the cache has expired, otherwise returns
its data
"""
if self.isCacheExpired():
expiredSince = int(time()) - (self.lastUpdate + self.expiration)
raise MemoryCacheException("Memory cache expired for %d seconds" % expiredSince)
return self._cache
def setCache(self, inputData):
"""
Refresh the cache with the content provided (refresh its expiration as well)
This method enforces the user to not change the cache data type
:param inputData: data to store in the cache
"""
if not isinstance(self._cache, type(inputData)):
raise TypeError("Current cache data type: %s, while new value is: %s" %
(type(self._cache), type(inputData)))
self.reset()
self.lastUpdate = int(time())
self._cache = inputData
def addItemToCache(self, inputItem):
"""
Adds new item(s) to the cache, without resetting its expiration.
It, of course, only works for data caches of type: list, set or dict.
:param inputItem: additional item to be added to the current cached data
"""
if isinstance(self._cache, set) and isinstance(inputItem, (list, set)):
# extend another list or set into a set
self._cache.update(inputItem)
elif isinstance(self._cache, set) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a set
self._cache.add(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (list, set)):
# extend another list or set into a list
self._cache.extend(inputItem)
elif isinstance(self._cache, list) and isinstance(inputItem, (int, float, str)):
# add a simple object (integer, string, etc) to a list
self._cache.append(inputItem)
elif isinstance(self._cache, dict) and isinstance(inputItem, dict):
self._cache.update(inputItem)
else:
msg = "Input item type: %s cannot be added to a cache type: %s" % (type(self._cache), type(inputItem))
raise TypeError("Cache and input item data type mismatch. %s" % msg) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/Utils/MemoryCache.py | 0.774796 | 0.226185 | MemoryCache.py | pypi |
from builtins import object
import logging
import time
import calendar
from datetime import tzinfo, timedelta
def gmtimeSeconds():
"""
Return GMT time in seconds
"""
return int(time.mktime(time.gmtime()))
def encodeTimestamp(secs):
"""
Encode second since epoch to a string GMT timezone representation
:param secs: input timestamp value (either int or float) in seconds since epoch
:return: time string in GMT timezone representation
"""
if not isinstance(secs, (int, float)):
raise Exception("Wrong input, should be seconds since epoch either int or float value")
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(int(secs)))
def decodeTimestamp(timeString):
"""
Decode timestamps in provided document
:param timeString: timestamp string represention in GMT timezone, see encodeTimestamp
:return: seconds since ecouch in GMT timezone
"""
if not isinstance(timeString, str):
raise Exception("Wrong input, should be time string in GMT timezone representation")
return calendar.timegm(time.strptime(timeString, "%Y-%m-%dT%H:%M:%SZ"))
def timeFunction(func):
"""
source: https://www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
Decorator function to measure how long a method/function takes to run
It returns a tuple with:
* wall clock time spent
* returned result of the function
* the function name
"""
def wrapper(*arg, **kw):
t1 = time.time()
res = func(*arg, **kw)
t2 = time.time()
return round((t2 - t1), 4), res, func.__name__
return wrapper
class CodeTimer(object):
"""
A context manager for timing function calls.
Adapted from https://www.blog.pythonlibrary.org/2016/05/24/python-101-an-intro-to-benchmarking-your-code/
Use like
with CodeTimer(label='Doing something'):
do_something()
"""
def __init__(self, label='The function', logger=None):
self.start = time.time()
self.label = label
self.logger = logger or logging.getLogger()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
runtime = round((end - self.start), 3)
self.logger.info(f"{self.label} took {runtime} seconds to complete")
class LocalTimezone(tzinfo):
"""
A required python 2 class to determine current timezone for formatting rfc3339 timestamps
Required for sending alerts to the MONIT AlertManager
Can be removed once WMCore starts using python3
Details of class can be found at: https://docs.python.org/2/library/datetime.html#tzinfo-objects
"""
def __init__(self):
super(LocalTimezone, self).__init__()
self.ZERO = timedelta(0)
self.STDOFFSET = timedelta(seconds=-time.timezone)
if time.daylight:
self.DSTOFFSET = timedelta(seconds=-time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return self.ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0 | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/Utils/Timers.py | 0.817028 | 0.254266 | Timers.py | pypi |
import copy
import unittest
class ExtendedUnitTestCase(unittest.TestCase):
"""
Class that can be imported to switch to 'mock'ed versions of
services.
"""
def assertContentsEqual(self, expected_obj, actual_obj, msg=None):
"""
A nested object comparison without regard for the ordering of contents. It asserts that
expected_obj and actual_obj contain the same elements and that their sub-elements are the same.
However, all sequences are allowed to contain the same elements, but in different orders.
"""
def traverse_dict(dictionary):
for key, value in list(dictionary.items()):
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
return
def get_dict_sortkey(x):
if isinstance(x, dict):
return list(x.keys())
else:
return x
def traverse_list(theList):
for value in theList:
if isinstance(value, dict):
traverse_dict(value)
elif isinstance(value, list):
traverse_list(value)
theList.sort(key=get_dict_sortkey)
return
if not isinstance(expected_obj, type(actual_obj)):
self.fail(msg="The two objects are different type and cannot be compared: %s and %s" % (
type(expected_obj), type(actual_obj)))
expected = copy.deepcopy(expected_obj)
actual = copy.deepcopy(actual_obj)
if isinstance(expected, dict):
traverse_dict(expected)
traverse_dict(actual)
elif isinstance(expected, list):
traverse_list(expected)
traverse_list(actual)
else:
self.fail(msg="The two objects are different type (%s) and cannot be compared." % type(expected_obj))
return self.assertEqual(expected, actual) | /reqmon-2.2.4rc3.tar.gz/reqmon-2.2.4rc3/src/python/Utils/ExtendedUnitTestCase.py | 0.664758 | 0.501587 | ExtendedUnitTestCase.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.