func_code_string stringlengths 52 1.94M | func_documentation_string stringlengths 1 47.2k |
|---|---|
def _getPercentile(points, n, interpolate=False):
sortedPoints = sorted(not_none(points))
if len(sortedPoints) == 0:
return None
fractionalRank = (n/100.0) * (len(sortedPoints) + 1)
rank = int(fractionalRank)
rankFraction = fractionalRank - rank
if not interpolate:
rank += i... | Percentile is calculated using the method outlined in the NIST Engineering
Statistics Handbook:
http://www.itl.nist.gov/div898/handbook/prc/section2/prc252.htm |
def nPercentile(requestContext, seriesList, n):
assert n, 'The requested percent is required to be greater than 0'
results = []
for s in seriesList:
# Create a sorted copy of the TimeSeries excluding None values in the
# values list.
s_copy = TimeSeries(s.name, s.start, s.end, s... | Returns n-percent of each series in the seriesList. |
def averageOutsidePercentile(requestContext, seriesList, n):
averages = [safeAvg(s) for s in seriesList]
if n < 50:
n = 100 - n
lowPercentile = _getPercentile(averages, 100 - n)
highPercentile = _getPercentile(averages, n)
return [s for s in seriesList
if not lowPercentile <... | Removes functions lying inside an average percentile interval |
def removeBetweenPercentile(requestContext, seriesList, n):
if n < 50:
n = 100 - n
transposed = list(zip_longest(*seriesList))
lowPercentiles = [_getPercentile(col, 100-n) for col in transposed]
highPercentiles = [_getPercentile(col, n) for col in transposed]
return [l for l in seriesLi... | Removes lines who do not have an value lying in the x-percentile of all
the values at a moment |
def removeAboveValue(requestContext, seriesList, n):
for s in seriesList:
s.name = 'removeAboveValue(%s, %g)' % (s.name, n)
s.pathExpression = s.name
for (index, val) in enumerate(s):
if val is None:
continue
if val > n:
s[index] =... | Removes data above the given threshold from the series or list of series
provided. Values above this threshold are assigned a value of None. |
def removeBelowPercentile(requestContext, seriesList, n):
for s in seriesList:
s.name = 'removeBelowPercentile(%s, %g)' % (s.name, n)
s.pathExpression = s.name
try:
percentile = nPercentile(requestContext, [s], n)[0][0]
except IndexError:
continue
... | Removes data below the nth percentile from the series or list of series
provided. Values below this percentile are assigned a value of None. |
def sortByName(requestContext, seriesList, natural=False):
if natural:
return list(sorted(seriesList, key=lambda x: paddedName(x.name)))
else:
return list(sorted(seriesList, key=lambda x: x.name)) | Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the metric name using either alphabetical
order or natural sorting. Natural sorting allows names containing numbers
to be sorted more naturally, e.g:
- Alphabetical sorting: server1, server11, server12, server2
- Natural sorti... |
def sortByTotal(requestContext, seriesList):
return list(sorted(seriesList, key=safeSum, reverse=True)) | Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the sum of values across the time period
specified. |
def useSeriesAbove(requestContext, seriesList, value, search, replace):
newSeries = []
for series in seriesList:
newname = re.sub(search, replace, series.name)
if safeMax(series) > value:
n = evaluateTarget(requestContext, newname)
if n is not None and len(n) > 0:
... | Compares the maximum of each series against the given `value`. If the
series maximum is greater than `value`, the regular expression search and
replace is applied against the series name to plot a related metric.
e.g. given useSeriesAbove(ganglia.metric1.reqs,10,'reqs','time'),
the response time metric... |
def mostDeviant(requestContext, seriesList, n):
deviants = []
for series in seriesList:
mean = safeAvg(series)
if mean is None:
continue
square_sum = sum([(value - mean) ** 2 for value in series
if value is not None])
sigma = safeDiv(squ... | Takes one metric or a wildcard seriesList followed by an integer N.
Draws the N most deviant metrics.
To find the deviants, the standard deviation (sigma) of each series
is taken and ranked. The top N standard deviations are returned.
Example::
&target=mostDeviant(server*.instance*.memory.free... |
def stdev(requestContext, seriesList, points, windowTolerance=0.1):
# For this we take the standard deviation in terms of the moving average
# and the moving average of series squares.
for seriesIndex, series in enumerate(seriesList):
stdevSeries = TimeSeries("stdev(%s,%d)" % (series.name, int(... | Takes one metric or a wildcard seriesList followed by an integer N.
Draw the Standard Deviation of all metrics passed for the past N
datapoints. If the ratio of null points in the window is greater than
windowTolerance, skip the calculation. The default for windowTolerance is
0.1 (up to 10% of points in... |
def secondYAxis(requestContext, seriesList):
for series in seriesList:
series.options['secondYAxis'] = True
series.name = 'secondYAxis(%s)' % series.name
return seriesList | Graph the series on the secondary Y axis. |
def holtWintersForecast(requestContext, seriesList):
previewSeconds = 7 * 86400 # 7 days
# ignore original data and pull new, including our preview
newContext = requestContext.copy()
newContext['startTime'] = (requestContext['startTime'] -
timedelta(seconds=previewSe... | Performs a Holt-Winters forecast using the series as input data. Data from
one week previous to the series is used to bootstrap the initial forecast. |
def holtWintersConfidenceBands(requestContext, seriesList, delta=3):
previewSeconds = 7 * 86400 # 7 days
# ignore original data and pull new, including our preview
newContext = requestContext.copy()
newContext['startTime'] = (requestContext['startTime'] -
timedelta(s... | Performs a Holt-Winters forecast using the series as input data and plots
upper and lower bands with the predicted forecast deviations. |
def holtWintersAberration(requestContext, seriesList, delta=3):
results = []
for series in seriesList:
confidenceBands = holtWintersConfidenceBands(requestContext, [series],
delta)
lowerBand = confidenceBands[0]
upperBand = confid... | Performs a Holt-Winters forecast using the series as input data and plots
the positive or negative deviation of the series data from the forecast. |
def holtWintersConfidenceArea(requestContext, seriesList, delta=3):
bands = holtWintersConfidenceBands(requestContext, seriesList, delta)
results = areaBetween(requestContext, bands)
for series in results:
series.name = series.name.replace('areaBetween',
... | Performs a Holt-Winters forecast using the series as input data and plots
the area between the upper and lower bands of the predicted forecast
deviations. |
def linearRegressionAnalysis(series):
n = safeLen(series)
sumI = sum([i for i, v in enumerate(series) if v is not None])
sumV = sum([v for i, v in enumerate(series) if v is not None])
sumII = sum([i * i for i, v in enumerate(series) if v is not None])
sumIV = sum([i * v for i, v in enumerate(se... | Returns factor and offset of linear regression function by least
squares method. |
def linearRegression(requestContext, seriesList, startSourceAt=None,
endSourceAt=None):
from .app import evaluateTarget
results = []
sourceContext = requestContext.copy()
if startSourceAt is not None:
sourceContext['startTime'] = parseATTime(startSourceAt)
if endSou... | Graphs the liner regression function by least squares method.
Takes one metric or a wildcard seriesList, followed by a quoted string
with the time to start the line and another quoted string with the time
to end the line. The start and end times are inclusive (default range is
from to until). See ``fro... |
def drawAsInfinite(requestContext, seriesList):
for series in seriesList:
series.options['drawAsInfinite'] = True
series.name = 'drawAsInfinite(%s)' % series.name
return seriesList | Takes one metric or a wildcard seriesList.
If the value is zero, draw the line at 0. If the value is above zero, draw
the line at infinity. If the value is null or less than zero, do not draw
the line.
Useful for displaying on/off metrics, such as exit codes. (0 = success,
anything else = failure.)... |
def lineWidth(requestContext, seriesList, width):
for series in seriesList:
series.options['lineWidth'] = width
return seriesList | Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a line width of F, overriding the default
value of 1, or the &lineWidth=X.X parameter.
Useful for highlighting a single metric out of many, or having multiple
line widths in one graph.
Example::
... |
def dashed(requestContext, seriesList, dashLength=5):
for series in seriesList:
series.name = 'dashed(%s, %g)' % (series.name, dashLength)
series.options['dashed'] = dashLength
return seriesList | Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a dotted line with segments of length F
If omitted, the default length of the segments is 5.0
Example::
&target=dashed(server01.instance01.memory.free,2.5) |
def timeStack(requestContext, seriesList, timeShiftUnit, timeShiftStart,
timeShiftEnd):
# Default to negative. parseTimeOffset defaults to +
if timeShiftUnit[0].isdigit():
timeShiftUnit = '-' + timeShiftUnit
delta = parseTimeOffset(timeShiftUnit)
# if len(seriesList) > 1, they... | Takes one metric or a wildcard seriesList, followed by a quoted string
with the length of time (See ``from / until`` in the render\_api_ for
examples of time formats). Also takes a start multiplier and end
multiplier for the length of time-
Create a seriesList which is composed the original metric seri... |
def timeShift(requestContext, seriesList, timeShift, resetEnd=True,
alignDST=False):
# Default to negative. parseTimeOffset defaults to +
if timeShift[0].isdigit():
timeShift = '-' + timeShift
delta = parseTimeOffset(timeShift)
myContext = requestContext.copy()
myContext['... | Takes one metric or a wildcard seriesList, followed by a quoted string
with the length of time (See ``from / until`` in the render\_api_ for
examples of time formats).
Draws the selected metrics shifted in time. If no sign is given, a minus
sign ( - ) is implied which will shift the metric back in time... |
def timeSlice(requestContext, seriesList, startSliceAt, endSliceAt='now'):
results = []
start = epoch(parseATTime(startSliceAt))
end = epoch(parseATTime(endSliceAt))
for slicedSeries in seriesList:
slicedSeries.name = 'timeSlice(%s, %s, %s)' % (slicedSeries.name,
... | Takes one metric or a wildcard metric, followed by a quoted
string with the time to start the line and another quoted string
with the time to end the line. The start and end times are
inclusive. See ``from / until`` in the render api for examples of
time formats.
Useful for filtering out a part of ... |
def constantLine(requestContext, value):
name = "constantLine(%s)" % str(value)
start = int(epoch(requestContext['startTime']))
end = int(epoch(requestContext['endTime']))
step = int((end - start) / 2.0)
series = TimeSeries(str(value), start, end, step, [value, value, value])
series.pathExp... | Takes a float F.
Draws a horizontal line at value F across the graph.
Example::
&target=constantLine(123.456) |
def aggregateLine(requestContext, seriesList, func='avg'):
t_funcs = {'avg': safeAvg, 'min': safeMin, 'max': safeMax}
if func not in t_funcs:
raise ValueError("Invalid function %s" % func)
results = []
for series in seriesList:
value = t_funcs[func](series)
if value is not N... | Takes a metric or wildcard seriesList and draws a horizontal line
based on the function applied to each series.
Note: By default, the graphite renderer consolidates data points by
averaging data points over time. If you are using the 'min' or 'max'
function for aggregateLine, this can cause an unusual ... |
def verticalLine(requestContext, ts, label=None, color=None):
ts = int(epoch(parseATTime(ts, requestContext['tzinfo'])))
start = int(epoch(requestContext['startTime']))
end = int(epoch(requestContext['endTime']))
if ts < start:
raise ValueError("verticalLine(): timestamp %s exists "
... | Takes a timestamp string ts.
Draws a vertical line at the designated timestamp with optional
'label' and 'color'. Supported timestamp formats include both
relative (e.g. -3h) and absolute (e.g. 16:00_20110501) strings,
such as those used with ``from`` and ``until`` parameters. When
set, the 'label'... |
def threshold(requestContext, value, label=None, color=None):
[series] = constantLine(requestContext, value)
if label:
series.name = label
if color:
series.color = color
return [series] | Takes a float F, followed by a label (in double quotes) and a color.
(See ``bgcolor`` in the render\_api_ for valid color names & formats.)
Draws a horizontal line at value F across the graph.
Example::
&target=threshold(123.456, "omgwtfbbq", "red") |
def transformNull(requestContext, seriesList, default=0, referenceSeries=None):
def transform(v, d):
if v is None:
return d
else:
return v
if referenceSeries:
defaults = [default if any(v is not None for v in x) else None
for x in zip_long... | Takes a metric or wildcard seriesList and replaces null values with
the value specified by `default`. The value 0 used if not specified.
The optional referenceSeries, if specified, is a metric or wildcard
series list that governs which time intervals nulls should be replaced.
If specified, nulls are re... |
def isNonNull(requestContext, seriesList):
def transform(v):
if v is None:
return 0
else:
return 1
for series in seriesList:
series.name = "isNonNull(%s)" % (series.name)
series.pathExpression = series.name
values = [transform(v) for v in seri... | Takes a metric or wild card seriesList and counts up how many
non-null values are specified. This is useful for understanding
which metrics have data at a given point in time (ie, to count
which servers are alive).
Example::
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList whe... |
def identity(requestContext, name, step=60):
start = int(epoch(requestContext["startTime"]))
end = int(epoch(requestContext["endTime"]))
values = range(start, end, step)
series = TimeSeries(name, start, end, step, values)
series.pathExpression = 'identity("%s")' % name
return [series] | Identity function:
Returns datapoints where the value equals the timestamp of the datapoint.
Useful when you have another series where the value is a timestamp, and
you want to compare it to the time of the datapoint, to render an age
Example::
&target=identity("The.time.series")
This wou... |
def countSeries(requestContext, *seriesLists):
if not seriesLists or not any(seriesLists):
series = constantLine(requestContext, 0).pop()
series.pathExpression = "countSeries()"
else:
seriesList, start, end, step = normalize(seriesLists)
name = "countSeries(%s)" % formatPath... | Draws a horizontal line representing the number of nodes found in the
seriesList.
Example::
&target=countSeries(carbon.agents.*.*) |
def group(requestContext, *seriesLists):
seriesGroup = []
for s in seriesLists:
seriesGroup.extend(s)
return seriesGroup | Takes an arbitrary number of seriesLists and adds them to a single
seriesList. This is used to pass multiple seriesLists to a function which
only takes one. |
def mapSeries(requestContext, seriesList, mapNode):
metaSeries = {}
keys = []
for series in seriesList:
key = series.name.split(".")[mapNode]
if key not in metaSeries:
metaSeries[key] = [series]
keys.append(key)
else:
metaSeries[key].append(se... | Short form: ``map()``.
Takes a seriesList and maps it to a list of sub-seriesList. Each
sub-seriesList has the given mapNode in common.
Example (note: This function is not very useful alone. It should be used
with :py:func:`reduceSeries`)::
mapSeries(servers.*.cpu.*,1) =>
[
... |
def reduceSeries(requestContext, seriesLists, reduceFunction, reduceNode,
*reduceMatchers):
metaSeries = {}
keys = []
for seriesList in seriesLists:
for series in seriesList:
nodes = series.name.split('.')
node = nodes[reduceNode]
reduceSerie... | Short form: ``reduce()``.
Takes a list of seriesLists and reduces it to a list of series by means of
the reduceFunction.
Reduction is performed by matching the reduceNode in each series against
the list of reduceMatchers. The each series is then passed to the
reduceFunction as arguments in the ord... |
def applyByNode(requestContext, seriesList, nodeNum, templateFunction,
newName=None):
from .app import evaluateTarget
prefixes = set()
for series in seriesList:
prefix = '.'.join(series.name.split('.')[:nodeNum + 1])
prefixes.add(prefix)
results = []
for prefix i... | Takes a seriesList and applies some complicated function (described by
a string), replacing templates with unique prefixes of keys from the
seriesList (the key is all nodes up to the index given as `nodeNum`).
If the `newName` parameter is provided, the name of the resulting series
will be given by tha... |
def groupByNode(requestContext, seriesList, nodeNum, callback):
return groupByNodes(requestContext, seriesList, callback, nodeNum) | Takes a serieslist and maps a callback to subgroups within as defined by a
common node.
Example::
&target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries")
Would return multiple series which are each the result of applying the
"sumSeries" function to groups joined on the second nod... |
def groupByNodes(requestContext, seriesList, callback, *nodes):
from .app import app
metaSeries = {}
keys = []
if isinstance(nodes, int):
nodes = [nodes]
for series in seriesList:
key = '.'.join(series.name.split(".")[n] for n in nodes)
if key not in metaSeries:
... | Takes a serieslist and maps a callback to subgroups within as defined by
multiple nodes.
Example::
&target=groupByNodes(ganglia.server*.*.cpu.load*,"sumSeries",1,4)
Would return multiple series which are each the result of applying the
"sumSeries" function to groups joined on the nodes' list ... |
def exclude(requestContext, seriesList, pattern):
regex = re.compile(pattern)
return [s for s in seriesList if not regex.search(s.name)] | Takes a metric or a wildcard seriesList, followed by a regular expression
in double quotes. Excludes metrics that match the regular expression.
Example::
&target=exclude(servers*.instance*.threads.busy,"server02") |
def smartSummarize(requestContext, seriesList, intervalString, func='sum'):
results = []
delta = parseTimeOffset(intervalString)
interval = to_seconds(delta)
# Adjust the start time to fit an entire day for intervals >= 1 day
requestContext = requestContext.copy()
tzinfo = requestContext['t... | Smarter experimental version of summarize. |
def summarize(requestContext, seriesList, intervalString, func='sum',
alignToFrom=False):
results = []
delta = parseTimeOffset(intervalString)
interval = to_seconds(delta)
for series in seriesList:
buckets = {}
timestamps = range(int(series.start), int(series.end) + 1,... | Summarize the data into interval buckets of a certain size.
By default, the contents of each interval bucket are summed together.
This is useful for counters where each increment represents a discrete
event and retrieving a "per X" value requires summing all the events in
that interval.
Specifying... |
def hitcount(requestContext, seriesList, intervalString,
alignToInterval=False):
results = []
delta = parseTimeOffset(intervalString)
interval = to_seconds(delta)
if alignToInterval:
requestContext = requestContext.copy()
tzinfo = requestContext['tzinfo']
s = re... | Estimate hit counts from a list of time series.
This function assumes the values in each time series represent
hits per second. It calculates hits per some larger interval
such as per day or per hour. This function is like summarize(),
except that it compensates automatically for different time s... |
def timeFunction(requestContext, name, step=60):
start = int(epoch(requestContext["startTime"]))
end = int(epoch(requestContext["endTime"]))
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
while when < requestContext["endTime"]:
values.append(epoch(whe... | Short Alias: time()
Just returns the timestamp for each X value. T
Example::
&target=time("The.time.series")
This would create a series named "The.time.series" that contains in Y
the same value (in seconds) as X.
A second argument can be provided as a step parameter (default is 60 secs) |
def sinFunction(requestContext, name, amplitude=1, step=60):
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
while when < requestContext["endTime"]:
values.append(math.sin(epoch(when))*amplitude)
when += delta
series = TimeSeries(
name, int... | Short Alias: sin()
Just returns the sine of the current time. The optional amplitude parameter
changes the amplitude of the wave.
Example::
&target=sin("The.time.series", 2)
This would create a series named "The.time.series" that contains sin(x)*2.
A third argument can be provided as a ... |
def randomWalkFunction(requestContext, name, step=60):
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
current = 0
while when < requestContext["endTime"]:
values.append(current)
current += random.random() - 0.5
when += delta
return [Tim... | Short Alias: randomWalk()
Returns a random walk starting at 0. This is great for testing when there
is no real data in whisper.
Example::
&target=randomWalk("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == x(t-1)+random()-0.5, and x... |
def validate(self, model, checks=[]):
custom = [
check_partial(reaction_id_check,
frozenset(r.id for r in model.reactions))
]
super(Medium, self).validate(model=model, checks=checks + custom) | Use a defined schema to validate the medium table format. |
def apply(self, model):
model.medium = {row.exchange: row.uptake
for row in self.data.itertuples(index=False)} | Set the defined medium on the given model. |
def add_environment_information(meta):
meta["timestamp"] = datetime.utcnow().isoformat(" ")
meta["platform"] = platform.system()
meta["release"] = platform.release()
meta["python"] = platform.python_version()
meta["packages"] = get_pkg_info("memote") | Record environment information. |
def find_transported_elements(rxn):
element_dist = defaultdict()
# Collecting elements for each metabolite.
for met in rxn.metabolites:
if met.compartment not in element_dist:
# Multiplication by the metabolite stoichiometry.
element_dist[met.compartment] = \
... | Return a dictionary showing the amount of transported elements of a rxn.
Collects the elements for each metabolite participating in a reaction,
multiplies the amount by the metabolite's stoichiometry in the reaction and
bins the result according to the compartment that metabolite is in. This
produces a... |
def find_transport_reactions(model):
transport_reactions = []
transport_rxn_candidates = set(model.reactions) - set(model.boundary) \
- set(find_biomass_reaction(model))
transport_rxn_candidates = set(
[rxn for rxn in transport_rxn_candidates if len(rxn.compartments) >= 2]
)
# A... | Return a list of all transport reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
A transport reaction is defined as follows:
1. It contains metabolites from at least 2 compartments and
2. at least 1 metabolite undergoes no... |
def is_transport_reaction_formulae(rxn):
# Collecting criteria to classify transporters by.
rxn_reactants = set([met.formula for met in rxn.reactants])
rxn_products = set([met.formula for met in rxn.products])
# Looking for formulas that stay the same on both side of the reaction.
transported_m... | Return boolean if a reaction is a transport reaction (from formulae).
Parameters
----------
rxn: cobra.Reaction
The metabolic reaction under investigation. |
def is_transport_reaction_annotations(rxn):
reactants = set([(k, tuple(v)) for met in rxn.reactants
for k, v in iteritems(met.annotation)
if met.id != "H"
and k is not None and k != 'sbo' and v is not None])
products = set([(k, tuple(v)) for me... | Return boolean if a reaction is a transport reaction (from annotations).
Parameters
----------
rxn: cobra.Reaction
The metabolic reaction under investigation. |
def find_converting_reactions(model, pair):
first = set(find_met_in_model(model, pair[0]))
second = set(find_met_in_model(model, pair[1]))
hits = list()
for rxn in model.reactions:
# FIXME: Use `set.issubset` much more idiomatic.
if len(first & set(rxn.reactants)) > 0 and len(
... | Find all reactions which convert a given metabolite pair.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
pair: tuple or list
A pair of metabolite identifiers without compartment suffix.
Returns
-------
frozenset
The set of reactio... |
def find_biomass_reaction(model):
sbo_matches = set([rxn for rxn in model.reactions if
rxn.annotation is not None and
'sbo' in rxn.annotation and
rxn.annotation['sbo'] == 'SBO:0000629'])
if len(sbo_matches) > 0:
return list(sbo_ma... | Return a list of the biomass reaction(s) of the model.
This function identifies possible biomass reactions using two steps:
1. Return reactions that include the SBO annotation "SBO:0000629" for
biomass.
If no reactions can be identifies this way:
2. Look for the ``buzzwords`` "biomass", "growth" an... |
def find_demand_reactions(model):
u
try:
extracellular = find_compartment_id_in_model(model, 'e')
except KeyError:
extracellular = None
return find_boundary_types(model, 'demand', extracellular) | u"""
Return a list of demand reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines demand reactions as:
-- 'unbalanced network reactions that allow the accumulation of a compound'
-- reactions that are chiefly ... |
def find_sink_reactions(model):
u
try:
extracellular = find_compartment_id_in_model(model, 'e')
except KeyError:
extracellular = None
return find_boundary_types(model, 'sink', extracellular) | u"""
Return a list of sink reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines sink reactions as:
-- 'similar to demand reactions' but reversible, thus able to supply the
model with metabolites
-- reactio... |
def find_exchange_rxns(model):
u
try:
extracellular = find_compartment_id_in_model(model, 'e')
except KeyError:
extracellular = None
return find_boundary_types(model, 'exchange', extracellular) | u"""
Return a list of exchange reactions.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
[1] defines exchange reactions as:
-- reactions that 'define the extracellular environment'
-- 'unbalanced, extra-organism reactions that... |
def find_interchange_biomass_reactions(model, biomass=None):
boundary = set(model.boundary)
transporters = find_transport_reactions(model)
if biomass is None:
biomass = set(find_biomass_reaction(model))
return boundary | transporters | biomass | Return the set of all transport, boundary, and biomass reactions.
These reactions are either pseudo-reactions, or incorporated to allow
metabolites to pass between compartments. Some tests focus on purely
metabolic reactions and hence exclude this set.
Parameters
----------
model : cobra.Model... |
def run_fba(model, rxn_id, direction="max", single_value=True):
model.objective = model.reactions.get_by_id(rxn_id)
model.objective_direction = direction
if single_value:
try:
return model.slim_optimize()
except Infeasible:
return np.nan
else:
try:
... | Return the solution of an FBA to a set objective function.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
rxn_id : string
A string containing the reaction ID of the desired FBA objective.
direction: string
A string containing either "max" ... |
def close_boundaries_sensibly(model):
for rxn in model.reactions:
if rxn.reversibility:
rxn.bounds = -1, 1
else:
rxn.bounds = 0, 1
for boundary in model.boundary:
boundary.bounds = (0, 0) | Return a cobra model with all boundaries closed and changed constraints.
In the returned model previously fixed reactions are no longer constrained
as such. Instead reactions are constrained according to their
reversibility. This is to prevent the FBA from becoming infeasible when
trying to solve a mod... |
def metabolites_per_compartment(model, compartment_id):
return [met for met in model.metabolites
if met.compartment == compartment_id] | Identify all metabolites that belong to a given compartment.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Model specific compartment identifier.
Returns
-------
list
List of metabolites belonging to a giv... |
def largest_compartment_id_met(model):
# Sort compartments by decreasing size and extract the largest two.
candidate, second = sorted(
((c, len(metabolites_per_compartment(model, c)))
for c in model.compartments), reverse=True, key=itemgetter(1))[:2]
# Compare the size of the compartme... | Return the ID of the compartment with the most metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
string
Compartment ID of the compartment with the most metabolites. |
def find_compartment_id_in_model(model, compartment_id):
if compartment_id not in COMPARTMENT_SHORTLIST.keys():
raise KeyError("{} is not in the COMPARTMENT_SHORTLIST! Make sure "
"you typed the ID correctly, if yes, update the "
"shortlist manually.".forma... | Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
compartment_id : string
Memote internal compartment identifier used to access compartment name
shortlist to look up poten... |
def find_met_in_model(model, mnx_id, compartment_id=None):
def compare_annotation(annotation):
query_values = set(utils.flatten(annotation.values()))
ref_values = set(utils.flatten(METANETX_SHORTLIST[mnx_id]))
return query_values & ref_values
# Make sure that the MNX ID we'... | Return specific metabolites by looking up IDs in METANETX_SHORTLIST.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
mnx_id : string
Memote internal MetaNetX metabolite identifier used to map between
cross-references in the METANETX_SHORTLIST.
... |
def find_bounds(model):
lower_bounds = np.asarray([rxn.lower_bound for rxn in model.reactions],
dtype=float)
upper_bounds = np.asarray([rxn.upper_bound for rxn in model.reactions],
dtype=float)
lower_bound = np.nanmedian(lower_bounds[lower_bou... | Return the median upper and lower bound of the metabolic model.
Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but
this may not be the case for merged or autogenerated models. In these
cases, this function is used to iterate over all the bounds of all the
reactions and find the ... |
def render_html(self):
return self._template.safe_substitute(
report_type=self._report_type,
results=self.render_json()
) | Render an HTML report. |
def compute_score(self):
# LOGGER.info("Begin scoring")
cases = self.get_configured_tests() | set(self.result.cases)
scores = DataFrame({"score": 0.0, "max": 1.0},
index=sorted(cases))
self.result.setdefault("score", dict())
self.result["score"... | Calculate the overall test score using the configuration. |
def find_components_without_sbo_terms(model, components):
return [elem for elem in getattr(model, components) if
elem.annotation is None or 'sbo' not in elem.annotation] | Find model components that are not annotated with any SBO terms.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
Th... |
def check_component_for_specific_sbo_term(items, term):
r
# check for multiple allowable SBO terms
if isinstance(term, list):
return [elem for elem in items if
elem.annotation is None or
'sbo' not in elem.annotation or
not any(i in elem.annotation['sbo... | r"""
Identify model components that lack a specific SBO term(s).
Parameters
----------
items : list
A list of model components i.e. reactions to be checked for a specific
SBO term.
term : str or list of str
A string denoting a valid SBO term matching the regex '^SBO:\d{7}$'
... |
def get_smallest_compound_id(compounds_identifiers):
return min((c for c in compounds_identifiers if c.startswith("C")),
key=lambda c: int(c[1:])) | Return the smallest KEGG compound identifier from a list.
KEGG identifiers may map to compounds, drugs or glycans prefixed
respectively with "C", "D", and "G" followed by at least 5 digits. We
choose the lowest KEGG identifier with the assumption that several
identifiers are due to chirality and that t... |
def map_metabolite2kegg(metabolite):
logger.debug("Looking for KEGG compound identifier for %s.", metabolite.id)
kegg_annotation = metabolite.annotation.get("kegg.compound")
if kegg_annotation is None:
# TODO (Moritz Beber): Currently name matching is very slow and
# inaccurate. We dis... | Return a KEGG compound identifier for the metabolite if it exists.
First see if there is an unambiguous mapping to a single KEGG compound ID
provided with the model. If not, check if there is any KEGG compound ID in
a list of mappings. KEGG IDs may map to compounds, drugs and glycans. KEGG
compound IDs... |
def translate_reaction(reaction, metabolite_mapping):
# Transport reactions where the same metabolite occurs in different
# compartments should have been filtered out but just to be sure, we add
# coefficients in the mapping.
stoichiometry = defaultdict(float)
for met, coef in iteritems(reactio... | Return a mapping from KEGG compound identifiers to coefficients.
Parameters
----------
reaction : cobra.Reaction
The reaction whose metabolites are to be translated.
metabolite_mapping : dict
An existing mapping from cobra.Metabolite to KEGG compound identifier
that may already ... |
def find_thermodynamic_reversibility_index(reactions):
u
incomplete_mapping = []
problematic_calculation = []
reversibility_indexes = []
unbalanced = []
metabolite_mapping = {}
for rxn in reactions:
stoich = translate_reaction(rxn, metabolite_mapping)
if len(stoich) < len(rxn... | u"""
Return the reversibility index of the given reactions.
To determine the reversibility index, we calculate
the reversibility index ln_gamma (see [1]_ section 3.5) of each reaction
using the eQuilibrator API [2]_.
Parameters
----------
reactions: list of cobra.Reaction
A... |
def check_stoichiometric_consistency(model):
problem = model.problem
# The transpose of the stoichiometric matrix N.T in the paper.
stoich_trans = problem.Model()
internal_rxns = con_helpers.get_internals(model)
metabolites = set(met for rxn in internal_rxns for met in rxn.metabolites)
LOGG... | Verify the consistency of the model's stoichiometry.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
See [1]_ section 3.1 for a complete description of the algorithm.
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Dete... |
def find_unconserved_metabolites(model):
problem = model.problem
stoich_trans = problem.Model()
internal_rxns = con_helpers.get_internals(model)
metabolites = set(met for rxn in internal_rxns for met in rxn.metabolites)
# The binary variables k[i] in the paper.
k_vars = list()
for met i... | Detect unconserved metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
See [1]_ section 3.2 for a complete description of the algorithm.
.. [1] Gevorgyan, A., M. G Poolman, and D. A Fell.
"Detection of Stoichiomet... |
def find_inconsistent_min_stoichiometry(model, atol=1e-13):
if check_stoichiometric_consistency(model):
return set()
Model, Constraint, Variable, Objective = con_helpers.get_interface(model)
unconserved_mets = find_unconserved_metabolites(model)
LOGGER.info("model has %d unconserved metabol... | Detect inconsistent minimal net stoichiometries.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
atol : float, optional
Values below the absolute tolerance are treated as zero. Expected to be
very small but larger than zero.
Notes
----... |
def detect_energy_generating_cycles(model, metabolite_id):
u
main_comp = helpers.find_compartment_id_in_model(model, 'c')
met = helpers.find_met_in_model(model, metabolite_id, main_comp)[0]
dissipation_rxn = Reaction('Dissipation')
if metabolite_id in ['MNXM3', 'MNXM63', 'MNXM51', 'MNXM121', 'MNXM42... | u"""
Detect erroneous energy-generating cycles for a a single metabolite.
The function will first build a dissipation reaction corresponding to the
input metabolite. This reaction is then set as the objective for
optimization, after closing all exchanges. If the reaction was able to
carry flux, an ... |
def find_stoichiometrically_balanced_cycles(model):
u
helpers.close_boundaries_sensibly(model)
fva_result = flux_variability_analysis(model, loopless=False)
return fva_result.index[
(fva_result["minimum"] <= (-1 + TOLERANCE_THRESHOLD)) |
(fva_result["maximum"] >= (1 - TOLERANCE_THRESHOLD... | u"""
Find metabolic reactions in stoichiometrically balanced cycles (SBCs).
Identify forward and reverse cycles by closing all exchanges and using FVA.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
"SBCs are artifacts of metabol... |
def find_orphans(model):
exchange = frozenset(model.exchanges)
return [
met for met in model.metabolites
if (len(met.reactions) > 0) and all(
(not rxn.reversibility) and (rxn not in exchange) and
(rxn.metabolites[met] < 0) for rxn in met.reactions
)
] | Return metabolites that are only consumed in reactions.
Metabolites that are involved in an exchange reaction are never
considered to be orphaned.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. |
def find_metabolites_not_produced_with_open_bounds(model):
mets_not_produced = list()
helpers.open_exchanges(model)
for met in model.metabolites:
with model:
exch = model.add_boundary(
met, type="irrex", reaction_id="IRREX", lb=0, ub=1000)
solution = help... | Return metabolites that cannot be produced with open exchange reactions.
A perfect model should be able to produce each and every metabolite when
all medium components are available.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------... |
def find_metabolites_not_consumed_with_open_bounds(model):
mets_not_consumed = list()
helpers.open_exchanges(model)
for met in model.metabolites:
with model:
exch = model.add_boundary(
met, type="irrex", reaction_id="IRREX", lb=-1000, ub=0)
solution = hel... | Return metabolites that cannot be consumed with open boundary reactions.
When all metabolites can be secreted, it should be possible for each and
every metabolite to be consumed in some form.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
... |
def find_reactions_with_unbounded_flux_default_condition(model):
try:
fva_result = flux_variability_analysis(model, fraction_of_optimum=1.0)
except Infeasible as err:
LOGGER.error("Failed to find reactions with unbounded flux "
"because '{}'. This may be a bug.".format(... | Return list of reactions whose flux is unbounded in the default condition.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
tuple
list
A list of reactions that in default modeling conditions are able to
c... |
def read_tabular(filename, dtype_conversion=None):
if dtype_conversion is None:
dtype_conversion = {}
name, ext = filename.split(".", 1)
ext = ext.lower()
# Completely empty columns are interpreted as float by default.
dtype_conversion["comment"] = str
if "csv" in ext:
df = ... | Read a tabular data file which can be CSV, TSV, XLS or XLSX.
Parameters
----------
filename : str or pathlib.Path
The full file path. May be a compressed file.
dtype_conversion : dict
Column names as keys and corresponding type for loading the data.
Please take a look at the `pa... |
def snapshot(model, filename, pytest_args, exclusive, skip, solver,
experimental, custom_tests, custom_config):
model_obj, sbml_ver, notifications = api.validate_model(
model)
if model_obj is None:
LOGGER.critical(
"The model could not be loaded due to the following... | Take a snapshot of a model's state and generate a report.
MODEL: Path to model file. Can also be supplied via the environment variable
MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. |
def history(location, model, filename, deployment, custom_config):
callbacks.git_installed()
LOGGER.info("Initialising history report generation.")
if location is None:
raise click.BadParameter("No 'location' given or configured.")
try:
repo = git.Repo()
except git.InvalidGitRep... | Generate a report over a model's git commit history. |
def diff(models, filename, pytest_args, exclusive, skip, solver,
experimental, custom_tests, custom_config):
if not any(a.startswith("--tb") for a in pytest_args):
pytest_args = ["--tb", "no"] + pytest_args
# Add further directories to search for tests.
pytest_args.extend(custom_tests)... | Take a snapshot of all the supplied models and generate a diff report.
MODELS: List of paths to two or more model files. |
def build_branch_structure(self, model, skip):
self._history = dict()
self._history["commits"] = commits = dict()
self._history["branches"] = branches = dict()
for branch in self._repo.refs:
LOGGER.debug(branch.name)
if branch.name in skip:
... | Inspect and record the repo's branches and their history. |
def load_history(self, model, skip={"gh-pages"}):
if self._history is None:
self.build_branch_structure(model, skip)
self._results = dict()
all_commits = list(self._history["commits"])
for commit in all_commits:
try:
self._results[commit] ... | Load the entire results history into memory.
Could be a bad idea in a far future. |
def get_result(self, commit, default=MemoteResult()):
assert self._results is not None, \
"Please call the method `load_history` first."
return self._results.get(commit, default) | Return an individual result from the history if it exists. |
def absolute_extreme_coefficient_ratio(model):
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
abs_matrix = np.abs(s_matrix)
return abs_matrix.max(), abs_matrix[abs_matrix > 0].min() | Return the maximum and minimum absolute, non-zero coefficients.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. |
def number_independent_conservation_relations(model):
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
ln_matrix = con_helpers.nullspace(s_matrix.T)
return ln_matrix.shape[1] | Return the number of conserved metabolite pools.
This number is given by the left null space of the stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. |
def matrix_rank(model):
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
return con_helpers.rank(s_matrix) | Return the rank of the model's stoichiometric matrix.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. |
def degrees_of_freedom(model):
s_matrix, _, _ = con_helpers.stoichiometry_matrix(
model.metabolites, model.reactions
)
return s_matrix.shape[1] - matrix_rank(model) | Return the degrees of freedom, i.e., number of "free variables".
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
This specifically refers to the dimensionality of the (right) null space
of the stoichiometric matrix, as dim(Null(S)) cor... |
def load(self, model):
self.load_medium(model)
self.load_essentiality(model)
self.load_growth(model)
# self.load_experiment(config.config.get("growth"), model)
return self | Load all information from an experimental configuration file.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. |
def validate(self):
validator = Draft4Validator(self.SCHEMA)
if not validator.is_valid(self.config):
for err in validator.iter_errors(self.config):
LOGGER.error(str(err.message))
validator.validate(self.config) | Validate the configuration file. |
def load_medium(self, model):
media = self.config.get("medium")
if media is None:
return
definitions = media.get("definitions")
if definitions is None or len(definitions) == 0:
return
path = self.get_path(media, join("data", "experimental", "media... | Load and validate all media. |
def load_essentiality(self, model):
data = self.config.get("essentiality")
if data is None:
return
experiments = data.get("experiments")
if experiments is None or len(experiments) == 0:
return
path = self.get_path(data,
... | Load and validate all data files. |
def load_growth(self, model):
data = self.config.get("growth")
if data is None:
return
experiments = data.get("experiments")
if experiments is None or len(experiments) == 0:
return
path = self.get_path(data,
join("data... | Load and validate all data files. |
def get_path(self, obj, default):
path = obj.get("path")
if path is None:
path = join(self._base, default)
if not isabs(path):
path = join(self._base, path)
return path | Return a relative or absolute path to experimental data. |
def find_components_without_annotation(model, components):
return [elem for elem in getattr(model, components) if
elem.annotation is None or len(elem.annotation) == 0] | Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without an... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.