func_code_string stringlengths 52 1.94M | func_documentation_string stringlengths 1 47.2k |
|---|---|
def re_escape(pattern, chars=frozenset("()[]{}?*+|^$\\.-#")):
escape = '\\{}'.format
return ''.join(
escape(c) if c in chars or c.isspace() else
'\\000' if c == '\x00' else c
for c in pattern
) | Escape all special regex characters in pattern.
Logic taken from regex module.
:param pattern: regex pattern to escape
:type patterm: str
:returns: escaped pattern
:rtype: str |
def register_plugin(manager):
manager.register_blueprint(player)
manager.register_mimetype_function(detect_playable_mimetype)
# add style tag
manager.register_widget(
place='styles',
type='stylesheet',
endpoint='player.static',
filename='css/browse.css'
)
# r... | Register blueprints and actions using given plugin manager.
:param manager: plugin manager
:type manager: browsepy.manager.PluginManager |
def fmt_size(size, binary=True):
if binary:
fmt_sizes = binary_units
fmt_divider = 1024.
else:
fmt_sizes = standard_units
fmt_divider = 1000.
for fmt in fmt_sizes[:-1]:
if size < 1000:
return (size, fmt)
size /= fmt_divider
return size, fm... | Get size and unit.
:param size: size in bytes
:type size: int
:param binary: whether use binary or standard units, defaults to True
:type binary: bool
:return: size and unit
:rtype: tuple of int and unit as str |
def relativize_path(path, base, os_sep=os.sep):
if not check_base(path, base, os_sep):
raise OutsideDirectoryBase("%r is not under %r" % (path, base))
prefix_len = len(base)
if not base.endswith(os_sep):
prefix_len += len(os_sep)
return path[prefix_len:] | Make absolute path relative to an absolute base.
:param path: absolute path
:type path: str
:param base: absolute base path
:type base: str
:param os_sep: path component separator, defaults to current OS separator
:type os_sep: str
:return: relative path
:rtype: str or unicode
:rais... |
def abspath_to_urlpath(path, base, os_sep=os.sep):
return relativize_path(path, base, os_sep).replace(os_sep, '/') | Make filesystem absolute path uri relative using given absolute base path.
:param path: absolute path
:param base: absolute base path
:param os_sep: path component separator, defaults to current OS separator
:return: relative uri
:rtype: str or unicode
:raises OutsideDirectoryBase: if resulting... |
def urlpath_to_abspath(path, base, os_sep=os.sep):
prefix = base if base.endswith(os_sep) else base + os_sep
realpath = os.path.abspath(prefix + path.replace('/', os_sep))
if check_path(base, realpath) or check_under_base(realpath, base):
return realpath
raise OutsideDirectoryBase("%r is no... | Make uri relative path fs absolute using a given absolute base path.
:param path: relative path
:param base: absolute base path
:param os_sep: path component separator, defaults to current OS separator
:return: absolute path
:rtype: str or unicode
:raises OutsideDirectoryBase: if resulting path... |
def generic_filename(path):
for sep in common_path_separators:
if sep in path:
_, path = path.rsplit(sep, 1)
return path | Extract filename of given path os-indepently, taking care of known path
separators.
:param path: path
:return: filename
:rtype: str or unicode (depending on given path) |
def clean_restricted_chars(path, restricted_chars=restricted_chars):
for character in restricted_chars:
path = path.replace(character, '_')
return path | Get path without restricted characters.
:param path: path
:return: path without restricted characters
:rtype: str or unicode (depending on given path) |
def check_forbidden_filename(filename,
destiny_os=os.name,
restricted_names=restricted_names):
return (
filename in restricted_names or
destiny_os == 'nt' and
filename.split('.', 1)[0].upper() in nt_device_names
) | Get if given filename is forbidden for current OS or filesystem.
:param filename:
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: wether is forbidden on given OS (or filesystem) or not
:rtype: bool |
def check_path(path, base, os_sep=os.sep):
base = base[:-len(os_sep)] if base.endswith(os_sep) else base
return os.path.normcase(path) == os.path.normcase(base) | Check if both given paths are equal.
:param path: absolute path
:type path: str
:param base: absolute base path
:type base: str
:param os_sep: path separator, defaults to os.sep
:type base: str
:return: wether two path are equal or not
:rtype: bool |
def check_base(path, base, os_sep=os.sep):
return (
check_path(path, base, os_sep) or
check_under_base(path, base, os_sep)
) | Check if given absolute path is under or given base.
:param path: absolute path
:type path: str
:param base: absolute base path
:type base: str
:param os_sep: path separator, defaults to os.sep
:return: wether path is under given base or not
:rtype: bool |
def check_under_base(path, base, os_sep=os.sep):
prefix = base if base.endswith(os_sep) else base + os_sep
return os.path.normcase(path).startswith(os.path.normcase(prefix)) | Check if given absolute path is under given base.
:param path: absolute path
:type path: str
:param base: absolute base path
:type base: str
:param os_sep: path separator, defaults to os.sep
:return: wether file is under given base or not
:rtype: bool |
def secure_filename(path, destiny_os=os.name, fs_encoding=compat.FS_ENCODING):
path = generic_filename(path)
path = clean_restricted_chars(
path,
restricted_chars=(
nt_restricted_chars
if destiny_os == 'nt' else
restricted_chars
))
path = ... | Get rid of parent path components and special filenames.
If path is invalid or protected, return empty string.
:param path: unsafe path, only basename will be used
:type: str
:param destiny_os: destination operative system (defaults to os.name)
:type destiny_os: str
:param fs_encoding: fs path... |
def alternative_filename(filename, attempt=None):
filename_parts = filename.rsplit(u'.', 2)
name = filename_parts[0]
ext = ''.join(u'.%s' % ext for ext in filename_parts[1:])
if attempt is None:
choose = random.choice
extra = u' %s' % ''.join(choose(fs_safe_characters) for i in rang... | Generates an alternative version of given filename.
If an number attempt parameter is given, will be used on the alternative
name, a random value will be used otherwise.
:param filename: original filename
:param attempt: optional attempt number, defaults to null
:return: new filename
:rtype: s... |
def scandir(path, app=None):
exclude = app and app.config.get('exclude_fnc')
if exclude:
return (
item
for item in compat.scandir(path)
if not exclude(item.path)
)
return compat.scandir(path) | Config-aware scandir. Currently, only aware of ``exclude_fnc``.
:param path: absolute path
:type path: str
:param app: flask application
:type app: flask.Flask or None
:returns: filtered scandir entries
:rtype: iterator |
def is_excluded(self):
exclude = self.app and self.app.config['exclude_fnc']
return exclude and exclude(self.path) | Get if current node shouldn't be shown, using :attt:`app` config's
exclude_fnc.
:returns: True if excluded, False otherwise |
def widgets(self):
widgets = []
if self.can_remove:
widgets.append(
self.plugin_manager.create_widget(
'entry-actions',
'button',
file=self,
css='remove',
endpoint='re... | List widgets with filter return True for this node (or without filter).
Remove button is prepended if :property:can_remove returns true.
:returns: list of widgets
:rtype: list of namedtuple instances |
def link(self):
link = None
for widget in self.widgets:
if widget.place == 'entry-link':
link = widget
return link | Get last widget with place "entry-link".
:returns: widget on entry-link (ideally a link one)
:rtype: namedtuple instance |
def can_remove(self):
dirbase = self.app.config["directory_remove"]
return bool(dirbase and check_under_base(self.path, dirbase)) | Get if current node can be removed based on app config's
directory_remove.
:returns: True if current node can be removed, False otherwise.
:rtype: bool |
def parent(self):
if check_path(self.path, self.app.config['directory_base']):
return None
parent = os.path.dirname(self.path) if self.path else None
return self.directory_class(parent, self.app) if parent else None | Get parent node if available based on app config's directory_base.
:returns: parent object if available
:rtype: Node instance or None |
def ancestors(self):
ancestors = []
parent = self.parent
while parent:
ancestors.append(parent)
parent = parent.parent
return ancestors | Get list of ancestors until app config's directory_base is reached.
:returns: list of ancestors starting from nearest.
:rtype: list of Node objects |
def modified(self):
try:
dt = datetime.datetime.fromtimestamp(self.stats.st_mtime)
return dt.strftime('%Y.%m.%d %H:%M:%S')
except OSError:
return None | Get human-readable last modification date-time.
:returns: iso9008-like date-time string (without timezone)
:rtype: str |
def from_urlpath(cls, path, app=None):
app = app or current_app
base = app.config['directory_base']
path = urlpath_to_abspath(path, base)
if not cls.generic:
kls = cls
elif os.path.isdir(path):
kls = cls.directory_class
else:
k... | Alternative constructor which accepts a path as taken from URL and uses
the given app or the current app config to get the real path.
If class has attribute `generic` set to True, `directory_class` or
`file_class` will be used as type.
:param path: relative path as from URL
:pa... |
def _find_paths(self, current_dir, patterns):
pattern = patterns[0]
patterns = patterns[1:]
has_wildcard = is_pattern(pattern)
using_globstar = pattern == "**"
# This avoids os.listdir() for performance
if has_wildcard:
entries = [x.name for x in scan... | Recursively generates absolute paths whose components
underneath current_dir match the corresponding pattern in
patterns |
def union_overlapping(intervals):
disjoint_intervals = []
for interval in intervals:
if disjoint_intervals and disjoint_intervals[-1].overlaps(interval):
disjoint_intervals[-1] = disjoint_intervals[-1].union(interval)
else:
disjoint_intervals.append(interval)
ret... | Union any overlapping intervals in the given set. |
def recurse(query, index):
for node in app.store.find(query):
if node.is_leaf:
index.add(node.path)
else:
recurse('{0}.*'.format(node.path), index) | Recursively walk across paths, adding leaves to the index as they're found. |
def setAggregationMethod(path, aggregationMethod, xFilesFactor=None):
fh = None
try:
fh = open(path,'r+b')
if LOCK:
fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
packedMetadata = fh.read(metadataSize)
try:
(aggregationType,maxRetention,xff,archiveCount) = struct.unpack(metadataFormat,pack... | setAggregationMethod(path,aggregationMethod,xFilesFactor=None)
path is a string
aggregationMethod specifies the method to use when propagating data (see ``whisper.aggregationMethods``)
xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur. ... |
def validateArchiveList(archiveList):
if not archiveList:
raise InvalidConfiguration("You must specify at least one archive configuration!")
archiveList = sorted(archiveList, key=lambda a: a[0]) #sort by precision (secondsPerPoint)
for i,archive in enumerate(archiveList):
if i == len(archiveList) - 1:
... | Validates an archiveList.
An ArchiveList must:
1. Have at least one archive config. Example: (60, 86400)
2. No archive may be a duplicate of another.
3. Higher precision archives' precision must evenly divide all lower precision archives' precision.
4. Lower precision archives must cover larger time intervals... |
def create(path,archiveList,xFilesFactor=None,aggregationMethod=None,sparse=False,useFallocate=False):
# Set default params
if xFilesFactor is None:
xFilesFactor = 0.5
if aggregationMethod is None:
aggregationMethod = 'average'
#Validate archive configurations...
validateArchiveList(archiveList)
... | create(path,archiveList,xFilesFactor=0.5,aggregationMethod='average')
path is a string
archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints)
xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur
agg... |
def update(path,value,timestamp=None):
value = float(value)
fh = None
try:
fh = open(path,'r+b')
return file_update(fh, value, timestamp)
finally:
if fh:
fh.close() | update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float |
def update_many(path,points):
if not points: return
points = [ (int(t),float(v)) for (t,v) in points]
points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first
fh = None
try:
fh = open(path,'r+b')
return file_update_many(fh, points)
finally:
if fh:
fh.close() | update_many(path,points)
path is a string
points is a list of (timestamp,value) points |
def info(path):
fh = None
try:
fh = open(path,'rb')
return __readHeader(fh)
finally:
if fh:
fh.close()
return None | info(path)
path is a string |
def fetch(path,fromTime,untilTime=None,now=None):
fh = None
try:
fh = open(path,'rb')
return file_fetch(fh, fromTime, untilTime, now)
finally:
if fh:
fh.close() | fetch(path,fromTime,untilTime=None)
path is a string
fromTime is an epoch time
untilTime is also an epoch time, but defaults to now.
Returns a tuple of (timeInfo, valueList)
where timeInfo is itself a tuple of (fromTime, untilTime, step)
Returns None if no data can be returned |
def __archive_fetch(fh, archive, fromTime, untilTime):
fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint']
untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint']
if fromInterval == untilInterval:
# Zero-leng... | Fetch data from a single archive. Note that checks for validity of the time
period requested happen above this level so it's possible to wrap around the
archive on a read and request data older than the archive's retention |
def merge(path_from, path_to):
fh_from = open(path_from, 'rb')
fh_to = open(path_to, 'rb+')
return file_merge(fh_from, fh_to) | Merges the data from one whisper file into another. Each file must have
the same archive configuration |
def diff(path_from, path_to, ignore_empty = False):
fh_from = open(path_from, 'rb')
fh_to = open(path_to, 'rb')
diffs = file_diff(fh_from, fh_to, ignore_empty)
fh_to.close()
fh_from.close()
return diffs | Compare two whisper databases. Each file must have the same archive configuration |
def add_data(self, path, time_info, data, exprs):
# Dont add if empty
if not nonempty(data):
for d in self.data[path]:
if nonempty(d['values']):
return
# Add data to path
for expr in exprs:
self.paths[expr].add(path)
... | Stores data before it can be put into a time series |
def extract_variants(pattern):
v1, v2 = pattern.find('{'), pattern.find('}')
if v1 > -1 and v2 > v1:
variations = pattern[v1+1:v2].split(',')
variants = [pattern[:v1] + v + pattern[v2+1:] for v in variations]
else:
variants = [pattern]
return list(_deduplicate(variants)) | Extract the pattern variants (ie. {foo,bar}baz = foobaz or barbaz). |
def match_entries(entries, pattern):
matching = []
for variant in expand_braces(pattern):
matching.extend(fnmatch.filter(entries, variant))
return list(_deduplicate(matching)) | A drop-in replacement for fnmatch.filter that supports pattern
variants (ie. {foo,bar}baz = foobaz or barbaz). |
def expand_braces(pattern):
res = set()
# Used instead of s.strip('{}') because strip is greedy.
# We want to remove only ONE leading { and ONE trailing }, if both exist
def remove_outer_braces(s):
if s[0] == '{' and s[-1] == '}':
return s[1:-1]
return s
match = EXPA... | Find the rightmost, innermost set of braces and, if it contains a
comma-separated list, expand its contents recursively (any of its items
may itself be a list enclosed in braces).
Return the full list of expanded strings. |
def select_host(self, metric):
key = self.keyfunc(metric)
nodes = []
servers = set()
for node in self.hash_ring.get_nodes(key):
server, instance = node
if server in servers:
continue
servers.add(server)
nodes.append... | Returns the carbon host that has data for the given metric. |
def safeArgs(args):
return (arg for arg in args
if arg is not None and not math.isnan(arg) and not math.isinf(arg)) | Iterate over valid, finite values in an iterable.
Skip any items that are None, NaN, or infinite. |
def dataLimits(data, drawNullAsZero=False, stacked=False):
missingValues = any(None in series for series in data)
finiteData = [series for series in data
if not series.options.get('drawAsInfinite')]
yMinValue = safeMin(safeMin(series) for series in finiteData)
if yMinValue is None... | Return the range of values in data as (yMinValue, yMaxValue).
data is an array of TimeSeries objects. |
def format_units(v, step=None, system="si", units=None):
if v is None:
return 0, ''
for prefix, size in UnitSystems[system]:
if condition(v, size, step):
v2 = v / size
if v2 - math.floor(v2) < 0.00000000001 and v > 1:
v2 = float(math.floor(v2))
... | Format the given value in standardized units.
``system`` is either 'binary' or 'si'
For more info, see:
http://en.wikipedia.org/wiki/SI_prefix
http://en.wikipedia.org/wiki/Binary_prefix |
def checkFinite(value, name='value'):
if math.isnan(value):
raise GraphError('Encountered NaN %s' % (name,))
elif math.isinf(value):
raise GraphError('Encountered infinite %s' % (name,))
return value | Check that value is a finite number.
If it is, return it. If not, raise GraphError describing the
problem, using name in the error message. |
def reconcileLimits(self):
if self.minValue < self.maxValue:
# The limits are already OK.
return
minFixed = (self.minValueSource in ['min'])
maxFixed = (self.maxValueSource in ['max', 'limit'])
if minFixed and maxFixed:
raise GraphError('The %... | If self.minValue is not less than self.maxValue, fix the problem.
If self.minValue is not less than self.maxValue, adjust
self.minValue and/or self.maxValue (depending on which was not
specified explicitly by the user) to make self.minValue <
self.maxValue. If the user specified both li... |
def applySettings(self, axisMin=None, axisMax=None, axisLimit=None):
if axisMin is not None and not math.isnan(axisMin):
self.minValueSource = 'min'
self.minValue = self.checkFinite(axisMin, 'axis min')
if axisMax == 'max':
self.maxValueSource = 'extremum'
... | Apply the specified settings to this axis.
Set self.minValue, self.minValueSource, self.maxValue,
self.maxValueSource, and self.axisLimit reasonably based on the
parameters provided.
Arguments:
axisMin -- a finite number, or None to choose a round minimum
limit tha... |
def makeLabel(self, value):
value, prefix = format_units(value, self.step,
system=self.unitSystem)
span, spanPrefix = format_units(self.span, self.step,
system=self.unitSystem)
if prefix:
prefix += ... | Create a label for the specified value.
Create a label string containing the value and its units (if any),
based on the values of self.step, self.span, and self.unitSystem. |
def generateSteps(self, minStep):
self.checkFinite(minStep)
if self.binary:
base = 2.0
mantissas = [1.0]
exponent = math.floor(math.log(minStep, 2) - EPSILON)
else:
base = 10.0
mantissas = [1.0, 2.0, 5.0]
exponent =... | Generate allowed steps with step >= minStep in increasing order. |
def computeSlop(self, step, divisor):
bottom = step * math.floor(self.minValue / float(step) + EPSILON)
top = bottom + step * divisor
if top >= self.maxValue - EPSILON * step:
return max(top - self.maxValue, self.minValue - bottom)
else:
return None | Compute the slop that would result from step and divisor.
Return the slop, or None if this combination can't cover the full
range. See chooseStep() for the definition of "slop". |
def chooseStep(self, divisors=None, binary=False):
self.binary = binary
if divisors is None:
divisors = [4, 5, 6]
else:
for divisor in divisors:
self.checkFinite(divisor, 'divisor')
if divisor < 1:
raise GraphEr... | Choose a nice, pretty size for the steps between axis labels.
Our main constraint is that the number of divisions must be taken
from the divisors list. We pick a number of divisions and a step
size that minimizes the amount of whitespace ("slop") that would
need to be included outside o... |
def formatPathExpressions(seriesList):
pathExpressions = sorted(set([s.pathExpression for s in seriesList]))
return ','.join(pathExpressions) | Returns a comma-separated list of unique path expressions. |
def sumSeries(requestContext, *seriesLists):
if not seriesLists or not any(seriesLists):
return []
seriesList, start, end, step = normalize(seriesLists)
name = "sumSeries(%s)" % formatPathExpressions(seriesList)
values = (safeSum(row) for row in zip_longest(*seriesList))
series = TimeSe... | Short form: sum()
This will add metrics together and return the sum at each datapoint. (See
integral for a sum over time)
Example::
&target=sum(company.server.application*.requestsHandled)
This would show the sum of all requests handled per minute (provided
requestsHandled are collected ... |
def sumSeriesWithWildcards(requestContext, seriesList, *positions):
newSeries = {}
newNames = list()
for series in seriesList:
newname = '.'.join(map(lambda x: x[1],
filter(lambda i: i[0] not in positions,
enumerate(series.nam... | Call sumSeries after inserting wildcards at the given position(s).
Example::
&target=sumSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value,
1)
This would be the equivalent of::
&target=sumSeries(host.*.cpu-user.value)&target=sumSeries(
... |
def averageSeriesWithWildcards(requestContext, seriesList, *positions):
matchedList = defaultdict(list)
for series in seriesList:
newname = '.'.join(map(lambda x: x[1],
filter(lambda i: i[0] not in positions,
enumerate(series.name... | Call averageSeries after inserting wildcards at the given position(s).
Example::
&target=averageSeriesWithWildcards(
host.cpu-[0-7].cpu-{user,system}.value, 1)
This would be the equivalent of::
&target=averageSeries(host.*.cpu-user.value)&target=averageSeries(
host.*.... |
def multiplySeriesWithWildcards(requestContext, seriesList, *position):
positions = [position] if isinstance(position, int) else position
newSeries = {}
newNames = []
for series in seriesList:
new_name = ".".join(map(lambda x: x[1],
filter(lambda i: i[0] not ... | Call multiplySeries after inserting wildcards at the given position(s).
Example::
&target=multiplySeriesWithWildcards(
web.host-[0-7].{avg-response,total-request}.value, 2)
This would be the equivalent of::
&target=multiplySeries(web.host-0.{avg-response,total-request}.value)
... |
def rangeOfSeries(requestContext, *seriesLists):
if not seriesLists or not any(seriesLists):
return []
seriesList, start, end, step = normalize(seriesLists)
name = "rangeOfSeries(%s)" % formatPathExpressions(seriesList)
values = (safeSubtract(max(row),
min(row)) f... | Takes a wildcard seriesList.
Distills down a set of inputs into the range of the series
Example::
&target=rangeOfSeries(Server*.connections.total) |
def percentileOfSeries(requestContext, seriesList, n, interpolate=False):
if n <= 0:
raise ValueError(
'The requested percent is required to be greater than 0')
if not seriesList:
return []
name = 'percentileOfSeries(%s,%g)' % (seriesList[0].pathExpression, n)
start, end... | percentileOfSeries returns a single series which is composed of the
n-percentile values taken across a wildcard series at each point.
Unless `interpolate` is set to True, percentile values are actual values
contained in one of the supplied series. |
def keepLastValue(requestContext, seriesList, limit=INF):
for series in seriesList:
series.name = "keepLastValue(%s)" % (series.name)
series.pathExpression = series.name
consecutiveNones = 0
for i, value in enumerate(series):
series[i] = value
# No 'keepi... | Takes one metric or a wildcard seriesList, and optionally a limit to the
number of 'None' values to skip over. Continues the line with the last
received value when gaps ('None' values) appear in your data, rather than
breaking your line.
Example::
&target=keepLastValue(Server01.connections.han... |
def interpolate(requestContext, seriesList, limit=INF):
for series in seriesList:
series.name = "interpolate(%s)" % (series.name)
series.pathExpression = series.name
consecutiveNones = 0
for i, value in enumerate(series):
series[i] = value
# No 'keeping' ... | Takes one metric or a wildcard seriesList, and optionally a limit to the
number of 'None' values to skip over. Continues the line with the last
received value when gaps ('None' values) appear in your data, rather than
breaking your line.
Example::
&target=interpolate(Server01.connections.handl... |
def changed(requestContext, seriesList):
for series in seriesList:
series.name = series.pathExpression = 'changed(%s)' % series.name
previous = None
for index, value in enumerate(series):
if previous is None:
series[index] = 0
elif value is not No... | Takes one metric or a wildcard seriesList.
Output 1 when the value changed, 0 when null or the same
Example::
&target=changed(Server01.connections.handled) |
def divideSeriesLists(requestContext, dividendSeriesList, divisorSeriesList):
if len(dividendSeriesList) != len(divisorSeriesList):
raise ValueError("dividendSeriesList and divisorSeriesList argument\
must have equal length")
results = []
for dividendSeries, divisorSeri... | Iterates over a two lists and divides list1[0] by list2[0], list1[1] by
list2[1] and so on. The lists need to be the same length |
def divideSeries(requestContext, dividendSeriesList, divisorSeriesList):
if len(divisorSeriesList) == 0:
for series in dividendSeriesList:
series.name = "divideSeries(%s,MISSING)" % series.name
series.pathExpression = series.name
for i in range(len(series)):
... | Takes a dividend metric and a divisor metric and draws the division result.
A constant may *not* be passed. To divide by a constant, use the scale()
function (which is essentially a multiplication operation) and use the
inverse of the dividend. (Division by 8 = multiplication by 1/8 or 0.125)
Example::... |
def multiplySeries(requestContext, *seriesLists):
if not seriesLists or not any(seriesLists):
return []
seriesList, start, end, step = normalize(seriesLists)
if len(seriesList) == 1:
return seriesList
name = "multiplySeries(%s)" % ','.join([s.name for s in seriesList])
product =... | Takes two or more series and multiplies their points. A constant may not be
used. To multiply by a constant, use the scale() function.
Example::
&target=multiplySeries(Series.dividends,Series.divisors) |
def weightedAverage(requestContext, seriesListAvg, seriesListWeight, *nodes):
if isinstance(nodes, int):
nodes = [nodes]
sortedSeries = {}
for seriesAvg, seriesWeight in zip_longest(
seriesListAvg, seriesListWeight):
key = ''
for node in nodes:
key += ser... | Takes a series of average values and a series of weights and
produces a weighted average for all values.
The corresponding values should share one or more zero-indexed nodes.
Example::
&target=weightedAverage(*.transactions.mean,*.transactions.count,0)
&target=weightedAverage(*.transactio... |
def exponentialMovingAverage(requestContext, seriesList, windowSize):
# EMA = C * (current_value) + (1 - C) + EMA
# C = 2 / (windowSize + 1)
# The following was copied from movingAverage, and altered for ema
if not seriesList:
return []
windowInterval = None
if isinstance(windowSize... | Takes a series of values and a window size and produces an exponential
moving average utilizing the following formula:
ema(current) = constant * (Current Value) + (1 - constant) * ema(previous)
The Constant is calculated as:
constant = 2 / (windowSize + 1)
The first period EMA uses a simple ... |
def movingMedian(requestContext, seriesList, windowSize):
if not seriesList:
return []
windowInterval = None
if isinstance(windowSize, six.string_types):
delta = parseTimeOffset(windowSize)
windowInterval = to_seconds(delta)
if windowInterval:
previewSeconds = window... | Graphs the moving median of a metric (or metrics) over a fixed number of
past points, or a time interval.
Takes one metric or a wildcard seriesList followed by a number N of
datapoints or a quoted string with a length of time like '1hour' or '5min'
(See ``from / until`` in the render\_api_ for examples... |
def scale(requestContext, seriesList, factor):
for series in seriesList:
series.name = "scale(%s,%g)" % (series.name, float(factor))
series.pathExpression = series.name
for i, value in enumerate(series):
series[i] = safeMul(value, factor)
return seriesList | Takes one metric or a wildcard seriesList followed by a constant, and
multiplies the datapoint by the constant provided at each point.
Example::
&target=scale(Server.instance01.threads.busy,10)
&target=scale(Server.instance*.threads.busy,10) |
def scaleToSeconds(requestContext, seriesList, seconds):
for series in seriesList:
series.name = "scaleToSeconds(%s,%d)" % (series.name, seconds)
series.pathExpression = series.name
factor = seconds * 1.0 / series.step
for i, value in enumerate(series):
series[i] = s... | Takes one metric or a wildcard seriesList and returns "value per seconds"
where seconds is a last argument to this functions.
Useful in conjunction with derivative or integral function if you want
to normalize its result to a known resolution for arbitrary retentions |
def pow(requestContext, seriesList, factor):
for series in seriesList:
series.name = "pow(%s,%g)" % (series.name, float(factor))
series.pathExpression = series.name
for i, value in enumerate(series):
series[i] = safePow(value, factor)
return seriesList | Takes one metric or a wildcard seriesList followed by a constant, and
raises the datapoint by the power of the constant provided at each point.
Example::
&target=pow(Server.instance01.threads.busy,10)
&target=pow(Server.instance*.threads.busy,10) |
def powSeries(requestContext, *seriesLists):
if not seriesLists or not any(seriesLists):
return []
seriesList, start, end, step = normalize(seriesLists)
name = "powSeries(%s)" % ','.join([s.name for s in seriesList])
values = []
for row in zip_longest(*seriesList):
first = True
... | Takes two or more series and pows their points. A constant line may be
used.
Example::
&target=powSeries(Server.instance01.app.requests,
Server.instance01.app.replies) |
def squareRoot(requestContext, seriesList):
for series in seriesList:
series.name = "squareRoot(%s)" % (series.name)
for i, value in enumerate(series):
series[i] = safePow(value, 0.5)
return seriesList | Takes one metric or a wildcard seriesList, and computes the square root
of each datapoint.
Example::
&target=squareRoot(Server.instance01.threads.busy) |
def absolute(requestContext, seriesList):
for series in seriesList:
series.name = "absolute(%s)" % (series.name)
series.pathExpression = series.name
for i, value in enumerate(series):
series[i] = safeAbs(value)
return seriesList | Takes one metric or a wildcard seriesList and applies the mathematical abs
function to each datapoint transforming it to its absolute value.
Example::
&target=absolute(Server.instance01.threads.busy)
&target=absolute(Server.instance*.threads.busy) |
def offset(requestContext, seriesList, factor):
for series in seriesList:
series.name = "offset(%s,%g)" % (series.name, float(factor))
series.pathExpression = series.name
for i, value in enumerate(series):
if value is not None:
series[i] = value + factor
... | Takes one metric or a wildcard seriesList followed by a constant, and adds
the constant to each datapoint.
Example::
&target=offset(Server.instance01.threads.busy,10) |
def offsetToZero(requestContext, seriesList):
for series in seriesList:
series.name = "offsetToZero(%s)" % (series.name)
minimum = safeMin(series)
for i, value in enumerate(series):
if value is not None:
series[i] = value - minimum
return seriesList | Offsets a metric or wildcard seriesList by subtracting the minimum
value in the series from each datapoint.
Useful to compare different series where the values in each series
may be higher or lower on average but you're only interested in the
relative difference.
An example use case is for compari... |
def movingAverage(requestContext, seriesList, windowSize):
if not seriesList:
return []
windowInterval = None
if isinstance(windowSize, six.string_types):
delta = parseTimeOffset(windowSize)
windowInterval = to_seconds(delta)
if windowInterval:
previewSeconds = windo... | Graphs the moving average of a metric (or metrics) over a fixed number of
past points, or a time interval.
Takes one metric or a wildcard seriesList followed by a number N of
datapoints or a quoted string with a length of time like '1hour' or '5min'
(See ``from / until`` in the render\_api_ for example... |
def movingSum(requestContext, seriesList, windowSize):
if not seriesList:
return []
windowInterval = None
if isinstance(windowSize, six.string_types):
delta = parseTimeOffset(windowSize)
windowInterval = abs(delta.seconds + (delta.days * 86400))
if windowInterval:
pr... | Graphs the moving sum of a metric (or metrics) over a fixed number of
past points, or a time interval.
Takes one metric or a wildcard seriesList followed by a number N of
datapoints or a quoted string with a length of time like '1hour' or '5min'
(See ``from / until`` in the render\_api_ for examples of... |
def movingMax(requestContext, seriesList, windowSize):
if not seriesList:
return []
windowInterval = None
if isinstance(windowSize, six.string_types):
delta = parseTimeOffset(windowSize)
windowInterval = abs(delta.seconds + (delta.days * 86400))
if windowInterval:
pr... | Graphs the moving maximum of a metric (or metrics) over a fixed number of
past points, or a time interval.
Takes one metric or a wildcard seriesList followed by a number N of
datapoints or a quoted string with a length of time like '1hour' or '5min'
(See ``from / until`` in the render\_api_ for example... |
def consolidateBy(requestContext, seriesList, consolidationFunc):
for series in seriesList:
# datalib will throw an exception, so it's not necessary to validate
# here
series.consolidationFunc = consolidationFunc
series.name = 'consolidateBy(%s,"%s")' % (series.name,
... | Takes one metric or a wildcard seriesList and a consolidation function
name.
Valid function names are 'sum', 'average', 'min', and 'max'.
When a graph is drawn where width of the graph size in pixels is smaller
than the number of datapoints to be graphed, Graphite consolidates the
values to to pre... |
def delay(requestContext, seriesList, steps):
results = []
for series in seriesList:
newValues = []
prev = []
for val in series:
if len(prev) < steps:
newValues.append(None)
prev.append(val)
continue
newValues.a... | This shifts all samples later by an integer number of steps. This can be
used for custom derivative calculations, among other things. Note: this
will pad the early end of the data with None for every step shifted.
This complements other time-displacement functions such as timeShift and
timeSlice, in th... |
def integral(requestContext, seriesList):
results = []
for series in seriesList:
newValues = []
current = 0.0
for val in series:
if val is None:
newValues.append(None)
else:
current += val
newValues.append(curre... | This will show the sum over time, sort of like a continuous addition
function. Useful for finding totals or trends in metrics that are
collected per minute.
Example::
&target=integral(company.sales.perMinute)
This would start at zero on the left side of the graph, adding the sales
each mi... |
def integralByInterval(requestContext, seriesList, intervalUnit):
intervalDuration = int(to_seconds(parseTimeOffset(intervalUnit)))
startTime = int(epoch(requestContext['startTime']))
results = []
for series in seriesList:
newValues = []
# current time within series iteration
... | This will do the same as integral() funcion, except resetting the total
to 0 at the given time in the parameter "from"
Useful for finding totals per hour/day/week/..
Example::
&target=integralByInterval(company.sales.perMinute,
"1d")&from=midnight-10days
Thi... |
def nonNegativeDerivative(requestContext, seriesList, maxValue=None):
results = []
for series in seriesList:
newValues = []
prev = None
for val in series:
if None in (prev, val):
newValues.append(None)
prev = val
continue
... | Same as the derivative function above, but ignores datapoints that trend
down. Useful for counters that increase for a long time, then wrap or
reset. (Such as if a network interface is destroyed and recreated by
unloading and re-loading a kernel module, common with USB / WiFi cards.
Example::
... |
def stacked(requestContext, seriesLists, stackName='__DEFAULT__'):
if 'totalStack' in requestContext:
totalStack = requestContext['totalStack'].get(stackName, [])
else:
requestContext['totalStack'] = {}
totalStack = []
results = []
for series in seriesLists:
newValue... | Takes one metric or a wildcard seriesList and change them so they are
stacked. This is a way of stacking just a couple of metrics without having
to use the stacked area mode (that stacks everything). By means of this a
mixed stacked and non stacked graph can be made
It can also take an optional argumen... |
def areaBetween(requestContext, *seriesLists):
if len(seriesLists) == 1:
[seriesLists] = seriesLists
assert len(seriesLists) == 2, ("areaBetween series argument must "
"reference *exactly* 2 series")
lower, upper = seriesLists
if len(lower) == 1:
[... | Draws the vertical area in between the two series in seriesList. Useful for
visualizing a range such as the minimum and maximum latency for a service.
areaBetween expects **exactly one argument** that results in exactly two
series (see example below). The order of the lower and higher values
series doe... |
def aliasSub(requestContext, seriesList, search, replace):
try:
seriesList.name = re.sub(search, replace, seriesList.name)
except AttributeError:
for series in seriesList:
series.name = re.sub(search, replace, series.name)
return seriesList | Runs series names through a regex search/replace.
Example::
&target=aliasSub(ip.*TCP*,"^.*TCP(\d+)","\\1") |
def alias(requestContext, seriesList, newName):
try:
seriesList.name = newName
except AttributeError:
for series in seriesList:
series.name = newName
return seriesList | Takes one metric or a wildcard seriesList and a string in quotes.
Prints the string instead of the metric name in the legend.
Example::
&target=alias(Sales.widgets.largeBlue,"Large Blue Widgets") |
def cactiStyle(requestContext, seriesList, system=None, units=None):
def fmt(x):
if system:
if units:
return "%.2f %s" % format_units(x, system=system, units=units)
else:
return "%.2f%s" % format_units(x, system=system)
else:
i... | Takes a series list and modifies the aliases to provide column aligned
output with Current, Max, and Min values in the style of cacti. Optionally
takes a "system" value to apply unit formatting in the same style as the
Y-axis, or a "unit" string to append an arbitrary unit suffix.
NOTE: column alignment... |
def _getFirstPathExpression(name):
tokens = grammar.parseString(name)
pathExpression = None
while pathExpression is None:
if tokens.pathExpression:
pathExpression = tokens.pathExpression
elif tokens.expression:
tokens = tokens.expression
elif tokens.call:... | Returns the first metric path in an expression. |
def aliasByNode(requestContext, seriesList, *nodes):
for series in seriesList:
pathExpression = _getFirstPathExpression(series.name)
metric_pieces = pathExpression.split('.')
series.name = '.'.join(metric_pieces[n] for n in nodes)
return seriesList | Takes a seriesList and applies an alias derived from one or more "node"
portion/s of the target name. Node indices are 0 indexed.
Example::
&target=aliasByNode(ganglia.*.cpu.load5,1) |
def legendValue(requestContext, seriesList, *valueTypes):
valueFuncs = {
'avg': lambda s: safeDiv(safeSum(s), safeLen(s)),
'total': safeSum,
'min': safeMin,
'max': safeMax,
'last': safeLast,
}
system = None
if valueTypes[-1] in ('si', 'binary'):
syste... | Takes one metric or a wildcard seriesList and a string in quotes.
Appends a value to the metric name in the legend. Currently one or several
of: `last`, `avg`, `total`, `min`, `max`. The last argument can be `si`
(default) or `binary`, in that case values will be formatted in the
corresponding system.
... |
def alpha(requestContext, seriesList, alpha):
for series in seriesList:
series.options['alpha'] = alpha
return seriesList | Assigns the given alpha transparency setting to the series. Takes a float
value between 0 and 1. |
def color(requestContext, seriesList, theColor):
for series in seriesList:
series.color = theColor
return seriesList | Assigns the given color to the seriesList
Example::
&target=color(collectd.hostname.cpu.0.user, 'green')
&target=color(collectd.hostname.cpu.0.system, 'ff0000')
&target=color(collectd.hostname.cpu.0.idle, 'gray')
&target=color(collectd.hostname.cpu.0.idle, '6464ffaa') |
def substr(requestContext, seriesList, start=0, stop=0):
for series in seriesList:
left = series.name.rfind('(') + 1
right = series.name.find(')')
if right < 0:
right = len(series.name)+1
cleanName = series.name[left:right:].split('.')
if int(stop) == 0:
... | Takes one metric or a wildcard seriesList followed by 1 or 2 integers.
Assume that the metric name is a list or array, with each element
separated by dots. Prints n - length elements of the array (if only one
integer n is passed) or n - m elements of the array (if two integers n and
m are passed). The l... |
def logarithm(requestContext, seriesList, base=10):
results = []
for series in seriesList:
newValues = []
for val in series:
if val is None:
newValues.append(None)
elif val <= 0:
newValues.append(None)
else:
... | Takes one metric or a wildcard seriesList, a base, and draws the y-axis in
logarithmic format. If base is omitted, the function defaults to base 10.
Example::
&target=log(carbon.agents.hostname.avgUpdateTime,2) |
def maximumBelow(requestContext, seriesList, n):
results = []
for series in seriesList:
val = safeMax(series)
if val is None or val <= n:
results.append(series)
return results | Takes one metric or a wildcard seriesList followed by a constant n.
Draws only the metrics with a maximum value below n.
Example::
&target=maximumBelow(system.interface.eth*.packetsSent,1000)
This would only display interfaces which always sent less than 1000
packets/min. |
def minimumBelow(requestContext, seriesList, n):
results = []
for series in seriesList:
val = safeMin(series)
if val is None or val <= n:
results.append(series)
return results | Takes one metric or a wildcard seriesList followed by a constant n.
Draws only the metrics with a minimum value below n.
Example::
&target=minimumBelow(system.interface.eth*.packetsSent,1000)
This would only display interfaces which sent at one point less than
1000 packets/min. |
def highestMax(requestContext, seriesList, n=1):
result_list = sorted(seriesList, key=lambda s: safeMax(s))[-n:]
return sorted(result_list, key=lambda s: max(s), reverse=True) | Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the N metrics with the highest
maximum value in the time period specified.
Example::
&target=highestMax(server*.instance*.threads.busy,5)
Draws the top 5 servers who have had the most bu... |
def currentAbove(requestContext, seriesList, n):
results = []
for series in seriesList:
val = safeLast(series)
if val is not None and val >= n:
results.append(series)
return results | Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the metrics whose value is above N
at the end of the time period specified.
Example::
&target=currentAbove(server*.instance*.threads.busy,50)
Draws the servers with more than 50 busy thre... |
def averageAbove(requestContext, seriesList, n):
results = []
for series in seriesList:
val = safeAvg(series)
if val is not None and val >= n:
results.append(series)
return results | Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the metrics with an average value
above N for the time period specified.
Example::
&target=averageAbove(server*.instance*.threads.busy,25)
Draws the servers with average values above 25. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.