docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
start uploading the file until upload is complete or error.
This is the main method to used, If you do not care about
state of process.
Args:
params: a dict object describe video info, eg title,
tags, description, category.
all video para... | def upload(self, params={}):
if self.upload_token is not None:
# resume upload
status = self.check()
if status['status'] != 4:
return self.commit()
else:
self.new_slice()
while self.slice_task_id != 0:
... | 957,403 |
Returns Python form of fully qualified name.
Args:
relative_to: If greater 0, the returned path is relative to the first n directories. | def filter_pyfqn(cls, value, relative_to=0):
def collect_packages(element, packages):
parent = element.eContainer()
if parent:
collect_packages(parent, packages)
packages.append(element.name)
packages = []
collect_packages(value, pac... | 957,558 |
Generate model code.
Args:
model: The meta-model to generate code for.
outfolder: Path to the directoty that will contain the generated code.
exclude: List of referenced resources for which code was already generated
(to prevent regeneration). | def generate(self, model, outfolder, *, exclude=None):
with pythonic_names():
super().generate(model, outfolder)
check_dependency = self.with_dependencies and model.eResource
if check_dependency:
if exclude is None:
exclude = set(... | 957,561 |
Returns a MultiFieldSelector based on combining the passed-in
FieldSelector and MultiFieldSelector objects.
args:
``*others=``\ *FieldSelector*\ \|\ *iterable*
Each argument is interpreted as either a FieldSelector, or a
FieldSelector constructor. | def __init__(self, *others):
selectors = list()
heads = collections.defaultdict(set)
for other in others:
if isinstance(other, MultiFieldSelector):
for head, tail in other.heads.iteritems():
heads[head].add(tail)
elif isinstanc... | 958,070 |
Deletes all of the fields at the specified locations.
args:
``obj=``\ *OBJECT*
the object to remove the fields from
``force=``\ *BOOL*
if True, missing attributes do not raise errors. Otherwise,
the first failure raises an exception wit... | def delete(self, obj, force=False):
# TODO: this could be a whole lot more efficient!
if not force:
for fs in self:
try:
fs.get(obj)
except FieldSelectorException:
raise
for fs in self:
try:... | 958,077 |
A wrapper for the `Go stage API`__
.. __: http://api.go.cd/current/#stages
Args:
server (Server): A configured instance of
:class:gocd.server.Server
pipeline_name (str): The name of the pipeline we're working on
stage_name (str): The name of the stage we're wo... | def __init__(self, server, pipeline_name, stage_name, pipeline_counter=None):
self.server = server
self.pipeline_name = pipeline_name
self.pipeline_counter = pipeline_counter
self.stage_name = stage_name | 958,082 |
Generator method which returns the differences from the invocant to
the argument.
args:
``other=``\ *Record*\ \|\ *Anything*
The thing to compare against; the types must match, unless
``duck_type=True`` is passed.
*diff_option*\ =\ *value*
... | def diff_iter(self, other, **kwargs):
from normalize.diff import diff_iter
return diff_iter(self, other, **kwargs) | 958,104 |
Create gene association for class :class:`.GeneDeletionStrategy`.
Return a dict mapping reaction IDs to
:class:`psamm.expression.boolean.Expression` objects,
representing relationships between reactions and related genes. This helper
function should be called when creating :class:`.GeneDeletionStrategy... | def get_gene_associations(model):
for reaction in model.reactions:
assoc = None
if reaction.genes is None:
continue
elif isinstance(reaction.genes, string_types):
assoc = boolean.Expression(reaction.genes)
else:
variables = [boolean.Variable(... | 958,183 |
The common visitor API used by all three visitor implementations.
args:
``visitor=``\ *Visitor*
Visitor options instance: contains the callbacks to use to
implement the visiting, as well as traversal & filtering
options.
``value=``\ *Obj... | def map(cls, visitor, value, value_type):
unpacked = visitor.unpack(value, value_type, visitor)
if unpacked == cls.StopVisiting or isinstance(
unpacked, cls.StopVisiting
):
return unpacked.return_value
if isinstance(unpacked, tuple):
props, ... | 958,238 |
Minimize flux of all reactions while keeping certain fluxes fixed.
The fixed reactions are given in a dictionary as reaction id
to value mapping. The weighted L1-norm of the fluxes is minimized.
Args:
model: MetabolicModel to solve.
fixed: dict of additional lower bounds on reaction fluxes... | def flux_minimization(model, fixed, solver, weights={}):
fba = FluxBalanceProblem(model, solver)
for reaction_id, value in iteritems(fixed):
flux = fba.get_flux_var(reaction_id)
fba.prob.add_linear_constraints(flux >= value)
fba.minimize_l1()
return ((reaction_id, fba.get_flux(r... | 958,250 |
Find a random flux solution on the boundary of the solution space.
The reactions in the threshold dictionary are constrained with the
associated lower bound.
Args:
model: MetabolicModel to solve.
threshold: dict of additional lower bounds on reaction fluxes.
tfba: If True enable th... | def flux_randomization(model, threshold, tfba, solver):
optimize = {}
for reaction_id in model.reactions:
if model.is_reversible(reaction_id):
optimize[reaction_id] = 2*random.random() - 1.0
else:
optimize[reaction_id] = random.random()
fba = _get_fba_problem(m... | 958,251 |
Check whether the ID is valid.
First check if the ID is missing, and then check if it is a qualified
string type, finally check if the string is empty. For all checks, it
would raise a ParseError with the corresponding message.
Args:
entity: a string type object to be checked.
entity_t... | def _check_id(entity, entity_type):
if entity is None:
raise ParseError('{} ID missing'.format(entity_type))
elif not isinstance(entity, string_types):
msg = '{} ID must be a string, id was {}.'.format(entity_type, entity)
if isinstance(entity, bool):
msg += (' You may ... | 958,293 |
Convert compartment entry to YAML dict.
Args:
compartment: :class:`psamm.datasource.entry.CompartmentEntry`.
adjacencies: Sequence of IDs or a single ID of adjacent
compartments (or None). | def convert_compartment_entry(self, compartment, adjacencies):
d = OrderedDict()
d['id'] = compartment.id
if adjacencies is not None:
d['adjacent_to'] = adjacencies
order = {key: i for i, key in enumerate(['name'])}
prop_keys = set(compartment.properties)
... | 958,333 |
Write iterable of entries as YAML object to stream.
Args:
stream: File-like object.
entries: Iterable of entries.
converter: Conversion function from entry to YAML object.
properties: Set of compartment properties to output (or None to
output all)... | def _write_entries(self, stream, entries, converter, properties=None):
def iter_entries():
for c in entries:
entry = converter(c)
if entry is None:
continue
if properties is not None:
entry = OrderedDict... | 958,336 |
Write iterable of compartments as YAML object to stream.
Args:
stream: File-like object.
compartments: Iterable of compartment entries.
adjacencies: Dictionary mapping IDs to adjacent compartment IDs.
properties: Set of compartment properties to output (or None t... | def write_compartments(self, stream, compartments, adjacencies,
properties=None):
def convert(entry):
return self.convert_compartment_entry(
entry, adjacencies.get(entry.id))
self._write_entries(stream, compartments, convert, properties) | 958,337 |
Write iterable of compounds as YAML object to stream.
Args:
stream: File-like object.
compounds: Iterable of compound entries.
properties: Set of compound properties to output (or None to output
all). | def write_compounds(self, stream, compounds, properties=None):
self._write_entries(
stream, compounds, self.convert_compound_entry, properties) | 958,338 |
Write iterable of reactions as YAML object to stream.
Args:
stream: File-like object.
compounds: Iterable of reaction entries.
properties: Set of reaction properties to output (or None to output
all). | def write_reactions(self, stream, reactions, properties=None):
self._write_entries(
stream, reactions, self.convert_reaction_entry, properties) | 958,339 |
Used to make a new Collection type, without that type having to be
defined explicitly. Generates a new type name using the item type and a
'suffix' Collection class property.
args:
``of=``\ *Record type*
The type of values of the collection
``coll=``\ *Collection sub-class*
... | def _make_generic(of, coll):
assert(issubclass(coll, Collection))
key = (coll.__name__, "%s.%s" % (of.__module__, of.__name__))
if key in GENERIC_TYPES:
if GENERIC_TYPES[key].itemtype != of:
raise exc.PropertyNotUnique(key=key)
else:
# oh, we get to name it? Goodie!
... | 958,497 |
Default collection constructor.
args:
``values=``\ *iterable*
Specify the initial contents of the collection. It will be
converted to the correct type using :py:meth:`coll_to_tuples`
and :py:meth:`tuples_to_coll`
``attribute=``\ *VALUE*... | def __init__(self, values=None, **kwargs):
self._values = type(self).tuples_to_coll(
type(self).coll_to_tuples(values)
)
super(Collection, self).__init__(**kwargs) | 958,498 |
Add all reactions from database that occur in given compartments.
Args:
model: :class:`psamm.metabolicmodel.MetabolicModel`. | def add_all_database_reactions(model, compartments):
added = set()
for rxnid in model.database.reactions:
reaction = model.database.get_reaction(rxnid)
if all(compound.compartment in compartments
for compound, _ in reaction.compounds):
if not model.has_reaction(r... | 958,583 |
Add all exchange reactions to database and to model.
Args:
model: :class:`psamm.metabolicmodel.MetabolicModel`. | def add_all_exchange_reactions(model, compartment, allow_duplicates=False):
all_reactions = {}
if not allow_duplicates:
# TODO: Avoid adding reactions that already exist in the database.
# This should be integrated in the database.
for rxnid in model.database.reactions:
... | 958,584 |
Add all transport reactions to database and to model.
Add transport reactions for all boundaries. Boundaries are defined
by pairs (2-tuples) of compartment IDs. Transport reactions are
added for all compounds in the model, not just for compounds in the
two boundary compartments.
Args:
mode... | def add_all_transport_reactions(model, boundaries, allow_duplicates=False):
all_reactions = {}
if not allow_duplicates:
# TODO: Avoid adding reactions that already exist in the database.
# This should be integrated in the database.
for rxnid in model.database.reactions:
... | 958,585 |
Returns an instance of :class:`Stage`
Args:
pipeline_name (str): Name of the pipeline the stage belongs to
stage_name (str): Name of the stage to act on
pipeline_counter (int): The pipeline instance the stage is for.
Returns:
Stage: an instantiated :class:... | def stage(self, pipeline_name, stage_name, pipeline_counter=None):
return Stage(self, pipeline_name, stage_name, pipeline_counter=pipeline_counter) | 958,598 |
Return unique signature object for :class:`Reaction`.
Signature objects are hashable, and compare equal only if the reactions
are considered the same according to the specified rules.
Args:
direction: Include reaction directionality when considering equality.
stoichiometry: Include stoichi... | def reaction_signature(eq, direction=False, stoichiometry=False):
def compounds_sig(compounds):
if stoichiometry:
return tuple(sorted(compounds))
else:
return tuple(sorted(compound for compound, _ in compounds))
left = compounds_sig(eq.left)
right = compounds_si... | 958,695 |
Calculate the overall charge for the specified reaction.
Args:
reaction: :class:`psamm.reaction.Reaction`.
compound_charge: a map from each compound to charge values. | def reaction_charge(reaction, compound_charge):
charge_sum = 0.0
for compound, value in reaction.compounds:
charge = compound_charge.get(compound.name, float('nan'))
charge_sum += charge * float(value)
return charge_sum | 958,698 |
Calculate the overall charge for all reactions in the model.
Yield (reaction, charge) pairs.
Args:
model: :class:`psamm.datasource.native.NativeModel`. | def charge_balance(model):
compound_charge = {}
for compound in model.compounds:
if compound.charge is not None:
compound_charge[compound.id] = compound.charge
for reaction in model.reactions:
charge = reaction_charge(reaction.equation, compound_charge)
yield react... | 958,699 |
Calculate formula compositions for both sides of the specified reaction.
If the compounds in the reaction all have formula, then calculate and
return the chemical compositions for both sides, otherwise return `None`.
Args:
reaction: :class:`psamm.reaction.Reaction`.
compound_formula: a map... | def reaction_formula(reaction, compound_formula):
def multiply_formula(compound_list):
for compound, count in compound_list:
yield count * compound_formula[compound.name]
for compound, _ in reaction.compounds:
if compound.name not in compound_formula:
return None
... | 958,700 |
Calculate formula compositions for each reaction.
Call :func:`reaction_formula` for each reaction.
Yield (reaction, result) pairs, where result has two formula compositions
or `None`.
Args:
model: :class:`psamm.datasource.native.NativeModel`. | def formula_balance(model):
# Mapping from compound id to formula
compound_formula = {}
for compound in model.compounds:
if compound.formula is not None:
try:
f = Formula.parse(compound.formula).flattened()
compound_formula[compound.id] = f
... | 958,701 |
Returns all the information regarding a specific pipeline run
See the `Go pipeline instance documentation`__ for examples.
.. __: http://api.go.cd/current/#get-pipeline-instance
Args:
counter (int): The pipeline instance to fetch.
If falsey returns the latest pipeline in... | def instance(self, counter=None):
if not counter:
history = self.history()
if not history:
return history
else:
return Response._from_json(history['pipelines'][0])
return self._get('/instance/{counter:d}'.format(counter=counte... | 958,730 |
Helper to instantiate an :class:`gocd.api.artifact.Artifact` object
Args:
counter (int): The pipeline counter to get the artifact for
stage: Stage name
job: Job name
stage_counter: Defaults to 1
Returns:
Artifact: :class:`gocd.api.artifact.Artifact` ob... | def artifact(self, counter, stage, job, stage_counter=1):
return Artifact(self.server, self.name, counter, stage, job, stage_counter) | 958,732 |
Yields the output and metadata from all jobs in the pipeline
Args:
instance: The result of a :meth:`instance` call, if not supplied
the latest of the pipeline will be used.
Yields:
tuple: (metadata (dict), output (str)).
metadata contains:
- pipel... | def console_output(self, instance=None):
if instance is None:
instance = self.instance()
for stage in instance['stages']:
for job in stage['jobs']:
if job['result'] not in self.final_results:
continue
artifact = self.... | 958,733 |
Helper to instantiate a :class:`gocd.api.stage.Stage` object
Args:
name: The name of the stage
pipeline_counter:
Returns: | def stage(self, name, pipeline_counter=None):
return Stage(
self.server,
pipeline_name=self.name,
stage_name=name,
pipeline_counter=pipeline_counter,
) | 958,734 |
Return a constant indicating the type of coupling.
Depending on the type of coupling, one of the constants from
:class:`.CouplingClass` is returned.
Args:
coupling: Tuple of minimum and maximum flux ratio | def classify_coupling(coupling):
lower, upper = coupling
if lower is None and upper is None:
return CouplingClass.Uncoupled
elif lower is None or upper is None:
return CouplingClass.DirectionalReverse
elif lower == 0.0 and upper == 0.0:
return CouplingClass.Inconsistent
... | 958,756 |
Convert raw SBML model to extended model.
Args:
model: :class:`NativeModel` obtained from :class:`SBMLReader`. | def convert_sbml_model(model):
biomass_reactions = set()
for reaction in model.reactions:
# Extract limits
if reaction.id not in model.limits:
lower, upper = parse_flux_bounds(reaction)
if lower is not None or upper is not None:
model.limits[reaction.... | 958,768 |
Yield key, value pairs parsed from the XHTML notes section.
Each key, value pair must be defined in its own text block, e.g.
``<p>key: value</p><p>key2: value2</p>``. The key and value must be
separated by a colon. Whitespace is stripped from both key and value, and
quotes are removed from values if pr... | def parse_xhtml_notes(entry):
for note in entry.xml_notes.itertext():
m = re.match(r'^([^:]+):(.+)$', note)
if m:
key, value = m.groups()
key = key.strip().lower().replace(' ', '_')
value = value.strip()
m = re.match(r'^"(.*)"$', value)
... | 958,774 |
Return species properties defined in the XHTML notes.
Older SBML models often define additional properties in the XHTML notes
section because structured methods for defining properties had not been
developed. This will try to parse the following properties: ``PUBCHEM ID``,
``CHEBI ID``, ``FORMULA``, ``... | def parse_xhtml_species_notes(entry):
properties = {}
if entry.xml_notes is not None:
cobra_notes = dict(parse_xhtml_notes(entry))
for key in ('pubchem_id', 'chebi_id'):
if key in cobra_notes:
properties[key] = cobra_notes[key]
if 'formula' in cobra_not... | 958,775 |
Return reaction properties defined in the XHTML notes.
Older SBML models often define additional properties in the XHTML notes
section because structured methods for defining properties had not been
developed. This will try to parse the following properties: ``SUBSYSTEM``,
``GENE ASSOCIATION``, ``EC NU... | def parse_xhtml_reaction_notes(entry):
properties = {}
if entry.xml_notes is not None:
cobra_notes = dict(parse_xhtml_notes(entry))
if 'subsystem' in cobra_notes:
properties['subsystem'] = cobra_notes['subsystem']
if 'gene_association' in cobra_notes:
prope... | 958,776 |
Return objective value for reaction entry.
Detect objectives that are specified using the non-standardized
kinetic law parameters which are used by many pre-FBC SBML models. The
objective coefficient is returned for the given reaction, or None if
undefined.
Args:
entry: :class:`SBMLReactio... | def parse_objective_coefficient(entry):
for parameter in entry.kinetic_law_reaction_parameters:
pid, name, value, units = parameter
if (pid == 'OBJECTIVE_COEFFICIENT' or
name == 'OBJECTIVE_COEFFICIENT'):
return value
return None | 958,777 |
Return flux bounds for reaction entry.
Detect flux bounds that are specified using the non-standardized
kinetic law parameters which are used by many pre-FBC SBML models. The
flux bounds are returned as a pair of lower, upper bounds. The returned
bound is None if undefined.
Args:
entry: :c... | def parse_flux_bounds(entry):
lower_bound = None
upper_bound = None
for parameter in entry.kinetic_law_reaction_parameters:
pid, name, value, units = parameter
if pid == 'UPPER_BOUND' or name == 'UPPER_BOUND':
upper_bound = value
elif pid == 'LOWER_BOUND' or name == ... | 958,778 |
Detect the identifier for equations with extracellular compartments.
Args:
model: :class:`NativeModel`. | def detect_extracellular_compartment(model):
extracellular_key = Counter()
for reaction in model.reactions:
equation = reaction.equation
if equation is None:
continue
if len(equation.compounds) == 1:
compound, _ = equation.compounds[0]
compartme... | 958,779 |
Convert exchange reactions in model to exchange compounds.
Only exchange reactions in the extracellular compartment are converted.
The extracelluar compartment must be defined for the model.
Args:
model: :class:`NativeModel`. | def convert_exchange_to_compounds(model):
# Build set of exchange reactions
exchanges = set()
for reaction in model.reactions:
equation = reaction.properties.get('equation')
if equation is None:
continue
if len(equation.compounds) != 1:
# Provide warning... | 958,780 |
Merge equivalent compounds in various compartments.
Tries to detect and merge compound entries that represent the same
compound in different compartments. The entries are only merged if all
properties are equivalent. Compound entries must have an ID with a suffix
of an underscore followed by the compar... | def merge_equivalent_compounds(model):
def dicts_are_compatible(d1, d2):
return all(key not in d1 or key not in d2 or d1[key] == d2[key]
for key in set(d1) | set(d2))
compound_compartment = {}
inelegible = set()
for reaction in model.reactions:
equation = reactio... | 958,781 |
Write a given model to file.
Args:
file: File-like object open for writing.
model: Instance of :class:`NativeModel` to write.
pretty: Whether to format the XML output for readability. | def write_model(self, file, model, pretty=False):
ET.register_namespace('mathml', MATHML_NS)
ET.register_namespace('xhtml', XHTML_NS)
ET.register_namespace('fbc', FBC_V2)
# Load compound information
compound_name = {}
compound_properties = {}
for compoun... | 958,807 |
JSON marshall in function: a 'visitor' function which looks for JSON
types/hints on types being converted to, but does not require them.
Args:
``record_type=``\ *TYPE*
Record type to convert data to
``json_struct=``\ *DICT|LIST*
a loaded (via ``json.loads``) data struct... | def from_json(record_type, json_struct):
if issubclass(record_type, JsonRecord):
return record_type(json_struct)
elif issubclass(record_type, Record):
# do what the default JsonRecord __init__ does
init_kwargs = json_to_initkwargs(record_type, json_struct)
instance = record... | 958,811 |
Build a new JsonRecord sub-class.
Args:
``json_data=``\ *LIST|other*
JSON data (string or already ``json.loads``'d)
``**kwargs``
Other initializer attributes, for lists with extra
attributes (eg, paging information) | def __init__(self, json_data=None, **kwargs):
if isinstance(json_data, OhPickle):
return
if isinstance(json_data, basestring):
json_data = json.loads(json_data)
if json_data is not None:
kwargs = type(self).json_to_initkwargs(json_data, kwargs)
... | 958,818 |
A wrapper for the `Go pluggable SCM API`__
.. __: https://api.go.cd/current/#scms
Args:
server (Server): A configured instance of
:class:gocd.server.Server
name (str): The name of the SCM material | def __init__(self, server, name=""):
self.server = server
self.name = name | 958,847 |
Remove old constraints and then solve the current problem.
Args:
sense: Minimize or maximize the objective.
(:class:`.lp.ObjectiveSense)
Returns:
The Result object for the solved LP problem | def _solve(self, sense=None):
# Remove the constraints from the last run
while len(self._remove_constr) > 0:
self._remove_constr.pop().delete()
try:
return self._prob.solve(sense=sense)
except lp.SolverError as e:
raise_from(MOMAError(text_ty... | 958,863 |
Solve the wild type problem using FBA.
Args:
objective: The objective reaction to be maximized.
Returns:
The LP Result object for the solved FBA problem. | def solve_fba(self, objective):
self._prob.set_objective(self._v_wt[objective])
return self._solve(lp.ObjectiveSense.Maximize) | 958,864 |
Return a dictionary of all the fluxes solved by FBA.
Dictionary of fluxes is used in :meth:`.lin_moma` and :meth:`.moma`
to minimize changes in the flux distributions following model
perturbation.
Args:
objective: The objective reaction that is maximized.
Returns:
... | def get_fba_flux(self, objective):
flux_result = self.solve_fba(objective)
fba_fluxes = {}
# Place all the flux values in a dictionary
for key in self._model.reactions:
fba_fluxes[key] = flux_result.get_value(self._v_wt[key])
return fba_fluxes | 958,865 |
Find the FBA solution that minimizes all the flux values.
Maximize the objective flux then minimize all other fluxes
while keeping the objective flux at the maximum.
Args:
objective: The objective reaction that is maximized.
Returns:
A dictionary of all the rea... | def get_minimal_fba_flux(self, objective):
# Define constraints
vs_wt = self._v_wt.set(self._model.reactions)
zs = self._z.set(self._model.reactions)
wt_obj_flux = self.get_fba_obj_flux(objective)
with self.constraints() as constr:
constr.add(
... | 958,866 |
Minimize the redistribution of fluxes using Euclidean distance.
Minimizing the redistribution of fluxes using a quadratic objective
function. The distance is minimized by minimizing the sum of
(wild type - knockout)^2.
Args:
wt_fluxes: Dictionary of all the wild type fluxes... | def moma(self, wt_fluxes):
reactions = set(self._adjustment_reactions())
v = self._v
obj_expr = 0
for f_reaction, f_value in iteritems(wt_fluxes):
if f_reaction in reactions:
# Minimize the Euclidean distance between the two vectors
o... | 958,870 |
Check consistency of model reactions.
Yield all reactions in the model that are not part of the consistent
subset.
Args:
model: :class:`MetabolicModel` to solve.
epsilon: Flux threshold value.
solver: LP solver instance to use. | def fastcc(model, epsilon, solver):
reaction_set = set(model.reactions)
subset = set(reaction_id for reaction_id in reaction_set
if model.limits[reaction_id].lower >= 0)
logger.info('Checking {} irreversible reactions...'.format(len(subset)))
logger.debug('|J| = {}, J = {}'.format... | 958,937 |
Quickly check whether model is consistent
Return true if the model is consistent. If it is only necessary to know
whether a model is consistent, this function is fast as it will return
the result as soon as it finds a single inconsistent reaction.
Args:
model: :class:`MetabolicModel` to solve.... | def fastcc_is_consistent(model, epsilon, solver):
for reaction in fastcc(model, epsilon, solver):
return False
return True | 958,938 |
Return consistent subset of model.
The largest consistent subset is returned as
a set of reaction names.
Args:
model: :class:`MetabolicModel` to solve.
epsilon: Flux threshold value.
solver: LP solver instance to use.
Returns:
Set of reaction IDs in the consistent reac... | def fastcc_consistent_subset(model, epsilon, solver):
reaction_set = set(model.reactions)
return reaction_set.difference(fastcc(model, epsilon, solver)) | 958,939 |
Match compounds greedily based on score function.
Args:
reaction: Reaction equation :class:`psamm.reaction.Reaction`.
compound_formula: Dictionary mapping compound IDs to
:class:`psamm.formula.Formula`. Formulas must be flattened.
score_func: Function that takes two :class:`_Com... | def _match_greedily(reaction, compound_formula, score_func):
uninstantiated_left, uninstantiated_right = _reaction_to_dicts(reaction)
def compound_instances(uninstantiated):
instances = []
for compound, value in iteritems(uninstantiated):
if value > 0:
f = compo... | 958,950 |
Instantiate a new webdriver class
Args:
outputdir: The path to the directory to use.
os_name: Valid options: ['windows', 'linux', 'mac']
os_bits: Valid options: ['32', '64'] | def __init__(self, outputdir, os_name, os_bits):
if type(self) == Basedriver:
raise Exception('Basedriver cannot be instantiated')
self.outputdir = outputdir
self.os_name = os_name
self.os_bits = os_bits | 959,085 |
class constructor
Args:
kwargs: widget options | def __init__(self, **kwargs):
super(Aladin, self).__init__(**kwargs)
# trigger the handle_aladin_event function when the send function is called on the js-side
# see: http://jupyter-notebook.readthedocs.io/en/latest/comms.html
self.on_msg(self.handle_aladin_event) | 959,395 |
load a VOTable table from an url and load its data into the widget
Args:
votable_URL: string url
votable_options: dictionary object | def add_catalog_from_URL(self, votable_URL, votable_options={}):
self.votable_URL= votable_URL
self.votable_options= votable_options
self.votable_from_URL_flag= not self.votable_from_URL_flag | 959,396 |
load a MOC from a URL and display it in Aladin Lite widget
Arguments:
moc_URL: string url
moc_options: dictionary object | def add_moc_from_URL(self, moc_URL, moc_options = {}):
self.moc_URL = moc_URL
self.moc_options = moc_options
self.moc_from_URL_flag = not self.moc_from_URL_flag | 959,397 |
load a MOC from a dict object and display it in Aladin Lite widget
Arguments:
moc_dict: the dict containing the MOC cells. Key are the HEALPix orders,
values are the pixel indexes, eg: {"1":[1,2,4], "2":[12,13,14,21,23,25]}
moc_options: dictionary object | def add_moc_from_dict(self, moc_dict, moc_options = {}):
self.moc_dict = moc_dict
self.moc_options = moc_options
self.moc_from_dict_flag = not self.moc_from_dict_flag | 959,398 |
load a VOTable -already accessible on the python side- into the widget
Args:
table: votable object | def add_table(self, table):
# theses library must be installed, and are used in votable operations
# http://www.astropy.org/
import astropy
table_array = table.__array__()
self.table_keys= table.keys()
table_columns= []
for i in range(0,len(tabl... | 959,399 |
add a listener to the widget
Args:
listener_type: string that can either be 'objectHovered' or 'objClicked'
callback: python function | def add_listener(self, listener_type, callback):
self.listener_type= listener_type
if listener_type == 'objectHovered':
self.listener_callback_source_hover= callback
elif listener_type == 'objectClicked':
self.listener_callback_source_click= callback
elif... | 959,400 |
Parse the ELF header in ``data`` and populate the properties.
Args:
data(bytes): The ELF header. | def _parse_header(self, data):
(magic, word_size, byte_order, version, osabi, abi_version, _), data = \
unpack('4sBBBBB7s', data[:16]), data[16:]
assert magic == self._ELF_MAGIC, 'Missing ELF magic'
assert word_size in (1, 2), 'Invalid word size'
assert byte_order ... | 959,723 |
Parse an ELF file and fill the class' properties.
Arguments:
f(file or str): The (path to) the ELF file to read. | def parse_file(self, f):
if type(f) is str:
self.f = open(f, 'rb')
else:
self.f = f
self._parse_header(self.f.read(64)) | 959,724 |
Get a specific section header by index or name.
Args:
section(int or str): The index or name of the section header to return.
Returns:
:class:`~ELF.SectionHeader`: The section header.
Raises:
KeyError: The requested section header does not exist. | def get_section_header(self, section):
self._ensure_section_headers_loaded()
if type(section) is int:
return self._section_headers_by_index[section]
else:
return self._section_headers_by_name[section] | 959,727 |
Get a specific symbol by index or name.
Args:
symbol(int or str): The index or name of the symbol to return.
Returns:
ELF.Symbol: The symbol.
Raises:
KeyError: The requested symbol does not exist. | def get_symbol(self, symbol):
self._ensure_symbols_loaded()
if type(symbol) is int:
return self._symbols_by_index[symbol]
else:
return self._symbols_by_name[symbol] | 959,731 |
Load a .pyc file from a file-like object.
Arguments:
fp(file): The file-like object to read.
Returns:
PycFile: The parsed representation of the .pyc file. | def pyc_load(fp):
magic_1 = U16(fp.read(2), target=MARSHAL_TARGET)
magic_2 = U16(fp.read(2), target=MARSHAL_TARGET)
internals = MAGIC_MAP.get(magic_1)
if internals is None:
raise ValueError('Invalid or unknown magic (%d).' % magic_1)
if magic_2 != 2573:
raise ValueError('Inva... | 959,768 |
Disassemble python bytecode into a series of :class:`Op` and
:class:`Label` instances.
Arguments:
code(bytes): The bytecode (a code object's ``co_code`` property). You
can also provide a function.
origin(dict): The opcode specification of the python version that
generate... | def disassemble(code, origin=None):
if inspect.isfunction(code):
code = six.get_function_code(code).co_code
origin = get_py_internals(origin)
opname = origin['opname']
hasjrel = origin['hasjrel']
hasjabs = origin['hasjabs']
hasjump = set(hasjrel) | set(hasjabs)
wordcode = ori... | 959,816 |
Assemble a set of :class:`Op` and :class:`Label` instance back into
bytecode.
Arguments:
ops(list): A list of opcodes and labels (as returned by
:func:`disassemble`).
target: The opcode specification of the targeted python
version. If this is ``None`` the specification o... | def assemble(ops, target=None):
target = get_py_internals(target)
opmap = target['opmap']
hasjrel = target['hasjrel']
hasjabs = target['hasjabs']
hasjump = set(hasjrel) | set(hasjabs)
have_argument = target['have_argument']
extended_arg = target['extended_arg']
wordcode = target[... | 959,817 |
Create a new instance from a function. Gets the code object from
the function and passes it and any other specified parameters to
:meth:`from_code`.
Arguments:
f(function): The function to get the code object from.
Returns:
CodeObject: A new :class:`CodeObject` ... | def from_function(cls, f, *args, **kwargs):
return cls.from_code(six.get_function_code(f), *args, **kwargs) | 959,826 |
Takes a bytecode operation (:class:`Op`) and annotates it using the
data contained in this code object.
Arguments:
op(Op): An :class:`Op` instance.
Returns:
AnnotatedOp: An annotated bytecode operation. | def annotate_op(self, op):
if isinstance(op, Label):
return op
else:
return AnnotatedOp(self, op.name, op.arg) | 959,827 |
Disassemble the bytecode of this code object into a series of
opcodes and labels. Can also annotate the opcodes and group
the opcodes into blocks based on the labels.
Arguments:
annotate(bool): Whether to annotate the operations.
blocks(bool): Whether to group the operat... | def disassemble(self, annotate=False, blocks=False):
ops = disassemble(self.co_code, self.internals)
if annotate:
ops = [self.annotate_op(op) for op in ops]
if blocks:
return blocks_from_ops(ops)
else:
return ops | 959,828 |
Read the index, and load the document list from it
Arguments:
callback --- called during the indexation (may be called *often*).
step : DocSearch.INDEX_STEP_READING or
DocSearch.INDEX_STEP_SORTING
progression : how many elements done yet
... | def reload_index(self, progress_cb=dummy_progress_cb):
nb_results = self.index.start_reload_index()
progress = 0
while self.index.continue_reload_index():
progress_cb(progress, nb_results, self.INDEX_STEP_LOADING)
progress += 1
progress_cb(1, 1, self.INDE... | 959,860 |
Create a new label
Arguments:
doc --- first document on which the label must be added (required
for now) | def create_label(self, label, doc=None, callback=dummy_progress_cb):
if doc:
clone = doc.clone() # make sure it's serializable
r = self.index.create_label(label, doc=clone)
return r | 959,863 |
Prepare a capstone disassembler instance for a given target and syntax.
Args:
syntax(AsmSyntax): The assembler syntax (Intel or AT&T).
target(~pwnypack.target.Target): The target to create a disassembler
instance for. The global target is used if this argument is
``None``.
... | def prepare_capstone(syntax=AsmSyntax.att, target=None):
if not HAVE_CAPSTONE:
raise NotImplementedError('pwnypack requires capstone to disassemble to AT&T and Intel syntax')
if target is None:
target = pwnypack.target.target
if target.arch == pwnypack.target.Target.Arch.x86:
... | 959,886 |
Given an element of a de Bruijn sequence, find its index in that sequence.
Args:
key(str): The piece of the de Bruijn sequence to find.
width(int): The width of each element in the sequence.
Returns:
int: The index of ``key`` in the de Bruijn sequence. | def cycle_find(key, width=4):
key_len = len(key)
buf = ''
it = deBruijn(width, 26)
for i in range(key_len):
buf += chr(ord('A') + next(it))
if buf == key:
return 0
for i, c in enumerate(it):
buf = buf[1:] + chr(ord('A') + c)
if buf == key:
re... | 959,903 |
Returns all the documents matching the given keywords
Arguments:
sentence --- a sentenced query
Returns:
An array of document (doc objects) | def find_documents(self, sentence, limit=None, must_sort=True,
search_type='fuzzy'):
sentence = sentence.strip()
sentence = strip_accents(sentence)
if sentence == u"":
return self.get_all_docs()
result_list_list = []
total_results = 0... | 959,934 |
Search all possible suggestions. Suggestions returned always have at
least one document matching.
Arguments:
sentence --- keywords (single strings) for which we want
suggestions
Return:
An array of sets of keywords. Each set of keywords (-> one string)
... | def find_suggestions(self, sentence):
if not isinstance(sentence, str):
sentence = str(sentence)
keywords = sentence.split(" ")
query_parser = self.search_param_list['strict'][0]['query_parser']
base_search = u" ".join(keywords).strip()
final_suggestions =... | 959,935 |
Create a new label
Arguments:
doc --- first document on which the label must be added (required
for now) | def create_label(self, label, doc=None):
label = copy.copy(label)
assert(label not in self.labels.values())
self.labels[label.name] = label
self.label_guesser.load(label.name)
# TODO(Jflesch): Should train with previous documents
if doc:
doc.add_label... | 959,936 |
Add a label on a document.
Arguments:
label --- The new label (see labels.Label)
doc --- The first document on which this label has been added | def add_label(self, doc, label, update_index=True):
label = copy.copy(label)
assert(label in self.labels.values())
doc.add_label(label)
if update_index:
self.upd_doc(doc)
self.commit() | 959,937 |
Return a suitable pickle protocol version for a given target.
Arguments:
target: The internals description of the targeted python
version. If this is ``None`` the specification of the currently
running python version will be used.
protocol(None or int): The requested protoco... | def get_protocol_version(protocol=None, target=None):
target = get_py_internals(target)
if protocol is None:
protocol = target['pickle_default_protocol']
if protocol > cPickle.HIGHEST_PROTOCOL:
warnings.warn('Downgrading pickle protocol, running python supports up to %d.' % cPickle.H... | 960,001 |
Very crude inter-python version opcode translator. Raises SyntaxError when
the opcode doesn't exist in the destination opmap. Used to transcribe
python code objects between python versions.
Arguments:
code_obj(pwnypack.bytecode.CodeObject): The code object representation
to translate.
... | def translate_opcodes(code_obj, target):
target = get_py_internals(target)
src_ops = code_obj.disassemble()
dst_opmap = target['opmap']
dst_ops = []
op_iter = enumerate(src_ops)
for i, op in op_iter:
if isinstance(op, pwnypack.bytecode.Label):
dst_ops.append(op)
... | 960,003 |
Assume the identity of another target. This can be useful to make the
global target assume the identity of an ELF executable.
Arguments:
other(:class:`Target`): The target whose identity to assume.
Example:
>>> from pwny import *
>>> target.assume(ELF('my-ex... | def assume(self, other):
self._arch = other._arch
self._bits = other._bits
self._endian = other._endian
self._mode = other._mode | 960,285 |
Allocate a piece of data that will be included in the shellcode body.
Arguments:
value(...): The value to add to the shellcode. Can be bytes or
string type.
Returns:
~pwnypack.types.Offset: The offset used to address the data. | def alloc_data(self, value):
if isinstance(value, six.binary_type):
return self._alloc_data(value)
elif isinstance(value, six.text_type):
return self._alloc_data(value.encode('utf-8') + b'\0')
else:
raise TypeError('No idea how to encode %s' % repr(v... | 960,448 |
Allocate a buffer (a range of uninitialized memory).
Arguments:
length(int): The length of the buffer to allocate.
Returns:
~pwnypack.types.Buffer: The object used to address this buffer. | def alloc_buffer(self, length):
buf = Buffer(sum(len(v) for v in six.iterkeys(self.data)) + sum(v.length for v in self.buffers), length)
self.buffers.append(buf)
return buf | 960,449 |
Add a value to a register. The value can be another :class:`Register`,
an :class:`Offset`, a :class:`Buffer`, an integer or ``None``.
Arguments:
reg(pwnypack.shellcode.types.Register): The register to add the
value to.
value: The value to add to the register.
... | def reg_add(self, reg, value):
if value is None:
return []
elif isinstance(value, Register):
return self.reg_add_reg(reg, value)
elif isinstance(value, (Buffer, six.integer_types)):
if isinstance(reg, Buffer):
value = sum(len(v) for... | 960,452 |
Translate a list of operations into its assembler source.
Arguments:
ops(list): A list of shellcode operations.
Returns:
str: The assembler source code that implements the shellcode. | def compile(self, ops):
def _compile():
code = []
for op in ops:
if isinstance(op, SyscallInvoke):
code.extend(self.syscall(op))
elif isinstance(op, LoadRegister):
code.extend(self.reg_load(op.register, op... | 960,454 |
Assemble a list of operations into executable code.
Arguments:
ops(list): A list of shellcode operations.
Returns:
bytes: The executable code that implements the shellcode. | def assemble(self, ops):
return pwnypack.asm.asm(self.compile(ops), target=self.target) | 960,455 |
Read *n* bytes from the subprocess' output channel.
Args:
n(int): The number of bytes to read.
Returns:
bytes: *n* bytes of output.
Raises:
EOFError: If the process exited. | def read(self, n):
d = b''
while n:
try:
block = self._process.stdout.read(n)
except ValueError:
block = None
if not block:
self._process.poll()
raise EOFError('Process ended')
d += ... | 960,466 |
Write *n* bytes to the subprocess' input channel.
Args:
data(bytes): The data to write.
Raises:
EOFError: If the process exited. | def write(self, data):
self._process.poll()
if self._process.returncode is not None:
raise EOFError('Process ended')
self._process.stdin.write(data) | 960,467 |
Receive *n* bytes from the socket.
Args:
n(int): The number of bytes to read.
Returns:
bytes: *n* bytes read from the socket.
Raises:
EOFError: If the socket was closed. | def read(self, n):
d = b''
while n:
try:
block = self._socket.recv(n)
except socket.error:
block = None
if not block:
raise EOFError('Socket closed')
d += block
n -= len(block)
r... | 960,468 |
Send *n* bytes to socket.
Args:
data(bytes): The data to send.
Raises:
EOFError: If the socket was closed. | def write(self, data):
while data:
try:
n = self._socket.send(data)
except socket.error:
n = None
if not n:
raise EOFError('Socket closed')
data = data[n:] | 960,469 |
Read *n* bytes from the channel.
Args:
n(int): The number of bytes to read from the channel.
echo(bool): Whether to write the read data to stdout.
Returns:
bytes: *n* bytes of data.
Raises:
EOFError: If the channel was closed. | def read(self, n, echo=None):
d = self.channel.read(n)
if echo or (echo is None and self.echo):
sys.stdout.write(d.decode('latin1'))
sys.stdout.flush()
return d | 960,474 |
Read until the channel is closed.
Args:
echo(bool): Whether to write the read data to stdout.
Returns:
bytes: The read data. | def read_eof(self, echo=None):
d = b''
while True:
try:
d += self.read(1, echo)
except EOFError:
return d | 960,475 |
Read until a certain string is encountered..
Args:
s(bytes): The string to wait for.
echo(bool): Whether to write the read data to stdout.
Returns:
bytes: The data up to and including *s*.
Raises:
EOFError: If the channel was closed. | def read_until(self, s, echo=None):
s_len = len(s)
buf = self.read(s_len, echo)
while buf[-s_len:] != s:
buf += self.read(1, echo)
return buf | 960,476 |
Read *n* lines from channel.
Args:
n(int): The number of lines to read.
echo(bool): Whether to write the read data to stdout.
Returns:
list of bytes: *n* lines which include new line characters.
Raises:
EOFError: If the channel was closed before... | def readlines(self, n, echo=None):
return [
self.until(b'\n', echo)
for _ in range(n)
] | 960,477 |
Write data to channel.
Args:
data(bytes): The data to write to the channel.
echo(bool): Whether to echo the written data to stdout.
Raises:
EOFError: If the channel was closed before all data was sent. | def write(self, data, echo=None):
if echo or (echo is None and self.echo):
sys.stdout.write(data.decode('latin1'))
sys.stdout.flush()
self.channel.write(data) | 960,478 |
Write a list of byte sequences to the channel and terminate them
with a separator (line feed).
Args:
lines(list of bytes): The lines to send.
sep(bytes): The separator to use after each line.
echo(bool): Whether to echo the written data to stdout.
Raises:
... | def writelines(self, lines, sep=b'\n', echo=None):
self.write(sep.join(lines + [b'']), echo) | 960,479 |
Write a byte sequences to the channel and terminate it with carriage
return and line feed.
Args:
line(bytes): The line to send.
sep(bytes): The separator to use after each line.
echo(bool): Whether to echo the written data to stdout.
Raises:
EOFE... | def writeline(self, line=b'', sep=b'\n', echo=None):
self.writelines([line], sep, echo) | 960,480 |
Set up a :class:`TCPClientSocketChannel` and create a :class:`Flow`
instance for it.
Args:
host(str): The hostname or IP address to connect to.
port(int): The port number to connect to.
echo(bool): Whether to echo read/written data to stdout by default.
Retu... | def connect_tcp(cls, host, port, echo=False):
return cls(TCPClientSocketChannel(host, port), echo=echo) | 960,483 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.