repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/modules.py
|
CmsAppIconList.init_with_context
|
python
|
def init_with_context(self, context):
super(CmsAppIconList, self).init_with_context(context)
apps = self.children
cms_apps = [a for a in apps if is_cms_app(a['name'])]
non_cms_apps = [a for a in apps if a not in cms_apps]
if cms_apps:
# Group the models of all CMS apps in one group.
cms_models = []
for app in cms_apps:
cms_models += app['models']
sort_cms_models(cms_models)
single_cms_app = {'name': "Modules", 'title': "Modules", 'url': "", 'models': cms_models}
# Put remaining groups after the first CMS group.
self.children = [single_cms_app] + non_cms_apps
|
Initializes the icon list.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/modules.py#L206-L226
|
[
"def sort_cms_models(cms_models):\n \"\"\"\n Sort a set of CMS-related models in a custom (predefined) order.\n \"\"\"\n cms_models.sort(key=lambda model: (\n get_cms_model_order(model['name']) if is_cms_app(model['app_name']) else 999,\n model['app_name'],\n model['title']\n ))\n",
"def init_with_context(self, context):\n \"\"\"\n Initializes the icon list.\n \"\"\"\n super(AppIconList, self).init_with_context(context)\n apps = self.children\n\n # Standard model only has a title, change_url and add_url.\n # Restore the app_name and name, so icons can be matched.\n for app in apps:\n app_name = self._get_app_name(app)\n app['name'] = app_name\n\n for model in app['models']:\n try:\n model_name = self._get_model_name(model)\n model['name'] = model_name\n model['icon'] = self.get_icon_for_model(app_name, model_name) or appsettings.FLUENT_DASHBOARD_DEFAULT_ICON\n except ValueError:\n model['icon'] = appsettings.FLUENT_DASHBOARD_DEFAULT_ICON\n\n # Automatically add STATIC_URL before relative icon paths.\n model['icon'] = self.get_icon_url(model['icon'])\n model['app_name'] = app_name\n"
] |
class CmsAppIconList(AppIconList):
"""
A variation of the :class:`AppIconList` class
with a strong bias towards sorting CMS apps on top.
.. image:: /images/cmsappiconlist.png
:width: 471px
:height: 124px
:alt: CmsAppIconList module for django-fluent-dashboard
"""
|
django-fluent/django-fluent-dashboard
|
fluent_dashboard/modules.py
|
CacheStatusGroup.init_with_context
|
python
|
def init_with_context(self, context):
super(CacheStatusGroup, self).init_with_context(context)
if 'dashboardmods' in settings.INSTALLED_APPS:
import dashboardmods
memcache_mods = dashboardmods.get_memcache_dash_modules()
try:
varnish_mods = dashboardmods.get_varnish_dash_modules()
except (socket.error, KeyError) as e:
# dashboardmods 2.2 throws KeyError for 'cache_misses' when the Varnish cache is empty.
# Socket errors are also ignored, to work similar to the memcache stats.
logger.exception("Unable to request Varnish stats: {0}".format(str(e)))
varnish_mods = []
except ImportError:
varnish_mods = []
self.children = memcache_mods + varnish_mods
|
Initializes the status list.
|
train
|
https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/modules.py#L250-L270
| null |
class CacheStatusGroup(modules.Group):
"""
Display status modules for Varnish en Memcache, in a :class:`~admin_tools.modules.Group` module.
This module is only displayed when the :ref:`dashboardmods` package
is installed, added to the ``INSTALLED_APPS``, and the caches are configured and reachable.
For more information, see the :ref:`optional dependencies <cachestatus>` page.
.. image:: /images/cachestatusgroup.png
:width: 471px
:height: 198px
:alt: CacheStatusGroup module for django-fluent-dashboard
"""
#: The default title
title = _("System status")
#: The default display mode, can be "tabs", "stacked" or "accordion"
display = "stacked"
#: Hide by default
enabled = False
|
goodmami/penman
|
penman.py
|
alphanum_order
|
python
|
def alphanum_order(triples):
return sorted(
triples,
key=lambda t: [
int(t) if t.isdigit() else t
for t in re.split(r'([0-9]+)', t.relation or '')
]
)
|
Sort a list of triples by relation name.
Embedded integers are sorted numerically, but otherwise the sorting
is alphabetic.
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L95-L108
| null |
#!/usr/bin/env python3
"""
PENMAN graph library for AMR, DMRS, etc.
Penman is a module to assist in working with graphs encoded in PENMAN
notation, such as those for Abstract Meaning Representation (AMR) or
Dependency Minimal Recursion Semantics (DMRS). It allows for conversion
between PENMAN and triples, inspection of the graphs, and
reserialization (e.g. for selecting a new top node). Some features,
such as conversion or reserialization, can be done by calling the
module as a script.
"""
from __future__ import print_function
USAGE = '''
Penman
An API and utility for working with graphs in PENMAN notation.
Usage: penman.py [-h|--help] [-V|--version] [options]
Options:
-h, --help display this help and exit
-V, --version display the version and exit
-i FILE, --input FILE read graphs from FILE instead of stdin
-o FILE, --output FILE write output to FILE instead of stdout
-t, --triples print graphs as triple conjunctions
--indent N indent N spaces per level ("no" for no newlines)
--amr use AMR codec instead of generic PENMAN one
'''
# API overview:
#
# Classes:
# * PENMANCodec(indent=True, relation_sort=original_order)
# - PENMANCodec.decode(s)
# - PENMANCodec.iterdecode(s)
# - PENMANCodec.encode(g, top=None)
# - PENMANCodec.is_relation_inverted(relation)
# - PENMANCodec.invert_relation(relation)
# - PENMANCodec.handle_triple(source, relation, target)
# - PENMANCodec.triples_to_graph(triples, top=None)
# * AMRCodec(indent=True, relation_sort=original_order)
# - (methods are the same as PENMANCodec)
# * Triple(source, relation, target)
# * Graph(data=None, top=None)
# - Graph.top
# - Graph.variables()
# - Graph.triples(source=None, relation=None, target=None)
# - Graph.edges(source=None, relation=None, target=None)
# - Graph.attributes(source=None, relation=None, target=None)
# - Graph.reentrancies()
#
# Module Functions:
# * decode(s, cls=PENMANCodec, **kwargs)
# * encode(g, cls=PENMANCodec, **kwargs)
# * load(source, triples=False, cls=PENMANCodec, **kwargs)
# * loads(string, triples=False, cls=PENMANCodec, **kwargs)
# * dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs)
# * dumps(graphs, triples=False, cls=PENMANCodec, **kwargs)
# * original_order(triples)
# * out_first_order(triples)
# * alphanum_order(triples)
import re
from collections import namedtuple, defaultdict
try:
basestring
except NameError:
basestring = str
__version__ = '0.6.2'
__version_info__ = [
int(x) if x.isdigit() else x
for x in re.findall(r'[0-9]+|[^0-9\.-]+', __version__)
]
def original_order(triples):
"""
Return a list of triples in the original order.
"""
return triples
def out_first_order(triples):
"""
Sort a list of triples so outward (true) edges appear first.
"""
return sorted(triples, key=lambda t: t.inverted)
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
class AMRCodec(PENMANCodec):
"""
An AMR codec for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
# vars: [a-z]+\d* ; first relation must be node type
NODE_ENTER_RE = re.compile(r'\s*(\()\s*(?=[a-z]+\d*\s*\/)')
NODETYPE_RE = PENMANCodec.ATOM_RE
VAR_RE = re.compile(r'([a-z]+\d*)')
# only non-anonymous relations
RELATION_RE = re.compile(r'(:[^\s(),]+)\s*')
_inversions = {
TYPE_REL: None, # don't allow inverted types
'domain': 'mod',
'consist-of': 'consist-of-of',
'prep-on-behalf-of': 'prep-on-behalf-of-of',
'prep-out-of': 'prep-out-of-of',
}
_deinversions = {
'mod': 'domain',
}
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return (
relation in self._deinversions or
(relation.endswith('-of') and relation not in self._inversions)
)
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
rel = self._deinversions.get(relation, relation[:-3])
else:
rel = self._inversions.get(relation, relation + '-of')
if rel is None:
raise PenmanError(
'Cannot (de)invert {}; not allowed'.format(relation)
)
return rel
class Triple(namedtuple('Triple', ('source', 'relation', 'target'))):
"""Container for Graph edges and node attributes."""
def __new__(cls, source, relation, target, inverted=None):
t = super(Triple, cls).__new__(
cls, source, relation, target
)
t.inverted = inverted
return t
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def triples(self, source=None, relation=None, target=None):
"""
Return triples filtered by their *source*, *relation*, or *target*.
"""
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
def edges(self, source=None, relation=None, target=None):
"""
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
"""
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
def attributes(self, source=None, relation=None, target=None):
"""
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
"""
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
def _regex(x, s, pos, msg):
m = x.match(s, pos=pos)
if m is None:
raise DecodeError('Expected {}'.format(msg), string=s, pos=pos)
return m
def _default_cast(x):
if isinstance(x, basestring):
if x.startswith('"'):
x = x # strip quotes?
elif re.match(
r'-?(0|[1-9]\d*)(\.\d+[eE][-+]?|\.|[eE][-+]?)\d+$', x):
x = float(x)
elif re.match(r'-?\d+$', x):
x = int(x)
return x
class PenmanError(Exception):
"""Base class for errors in the Penman package."""
class EncodeError(PenmanError):
"""Raises when encoding PENMAN-notation fails."""
class DecodeError(PenmanError):
"""Raised when decoding PENMAN-notation fails."""
def __init__(self, *args, **kwargs):
# Python2 doesn't allow parameters like:
# (*args, key=val, **kwargs)
# so do this manaully.
string = pos = None
if 'string' in kwargs:
string = kwargs['string']
del kwargs['string']
if 'pos' in kwargs:
pos = kwargs['pos']
del kwargs['pos']
super(DecodeError, self).__init__(*args, **kwargs)
self.string = string
self.pos = pos
def __str__(self):
if isinstance(self.pos, slice):
loc = ' in span {}:{}'.format(self.pos.start, self.pos.stop)
else:
loc = ' at position {}'.format(self.pos)
return Exception.__str__(self) + loc
def decode(s, cls=PENMANCodec, **kwargs):
"""
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
"""
codec = cls(**kwargs)
return codec.decode(s)
def encode(g, top=None, cls=PENMANCodec, **kwargs):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized graph; if
unset, the original top of *g* is used
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
"""
codec = cls(**kwargs)
return codec.encode(g, top=top)
def load(source, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
decode = cls(**kwargs).iterdecode
if hasattr(source, 'read'):
return list(decode(source.read()))
else:
with open(source) as fh:
return list(decode(fh.read()))
def loads(string, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples))
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
triples: if True, write graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
"""
text = dumps(graphs, triples=triples, cls=cls, **kwargs)
if hasattr(file, 'write'):
print(text, file=file)
else:
with open(file, 'w') as fh:
print(text, file=fh)
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
triples: if True, write graphs as triples instead of as PENMAN
Returns:
the string of serialized graphs
"""
codec = cls(**kwargs)
strings = [codec.encode(g, triples=triples) for g in graphs]
return '\n\n'.join(strings)
def _main():
import sys
from docopt import docopt
args = docopt(USAGE, version='Penman {}'.format(__version__))
infile = args['--input'] or sys.stdin
outfile = args['--output'] or sys.stdout
codec = AMRCodec if args['--amr'] else PENMANCodec
indent = True
if args['--indent']:
if args['--indent'].lower() in ("no", "none", "false"):
indent = False
else:
try:
indent = int(args['--indent'])
if indent < 0:
raise ValueError
except ValueError:
sys.exit('error: --indent value must be "no" or a '
' positive integer')
data = load(infile, cls=codec)
dump(data, outfile, triples=args['--triples'], cls=codec, indent=indent)
if __name__ == '__main__':
_main()
|
goodmami/penman
|
penman.py
|
decode
|
python
|
def decode(s, cls=PENMANCodec, **kwargs):
codec = cls(**kwargs)
return codec.decode(s)
|
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L793-L809
|
[
"def decode(self, s, triples=False):\n \"\"\"\n Deserialize PENMAN-notation string *s* into its Graph object.\n\n Args:\n s: a string containing a single PENMAN-serialized graph\n triples: if True, treat *s* as a conjunction of logical triples\n Returns:\n the Graph object described by *s*\n Example:\n\n >>> codec = PENMANCodec()\n >>> codec.decode('(b / bark :ARG1 (d / dog))')\n <Graph object (top=b) at ...>\n >>> codec.decode(\n ... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',\n ... triples=True\n ... )\n <Graph object (top=b) at ...>\n \"\"\"\n try:\n if triples:\n span, data = self._decode_triple_conjunction(s)\n else:\n span, data = self._decode_penman_node(s)\n except IndexError:\n raise DecodeError(\n 'Unexpected end of string.', string=s, pos=len(s)\n )\n top, nodes, edges = data\n return self.triples_to_graph(nodes + edges, top=top)\n"
] |
#!/usr/bin/env python3
"""
PENMAN graph library for AMR, DMRS, etc.
Penman is a module to assist in working with graphs encoded in PENMAN
notation, such as those for Abstract Meaning Representation (AMR) or
Dependency Minimal Recursion Semantics (DMRS). It allows for conversion
between PENMAN and triples, inspection of the graphs, and
reserialization (e.g. for selecting a new top node). Some features,
such as conversion or reserialization, can be done by calling the
module as a script.
"""
from __future__ import print_function
USAGE = '''
Penman
An API and utility for working with graphs in PENMAN notation.
Usage: penman.py [-h|--help] [-V|--version] [options]
Options:
-h, --help display this help and exit
-V, --version display the version and exit
-i FILE, --input FILE read graphs from FILE instead of stdin
-o FILE, --output FILE write output to FILE instead of stdout
-t, --triples print graphs as triple conjunctions
--indent N indent N spaces per level ("no" for no newlines)
--amr use AMR codec instead of generic PENMAN one
'''
# API overview:
#
# Classes:
# * PENMANCodec(indent=True, relation_sort=original_order)
# - PENMANCodec.decode(s)
# - PENMANCodec.iterdecode(s)
# - PENMANCodec.encode(g, top=None)
# - PENMANCodec.is_relation_inverted(relation)
# - PENMANCodec.invert_relation(relation)
# - PENMANCodec.handle_triple(source, relation, target)
# - PENMANCodec.triples_to_graph(triples, top=None)
# * AMRCodec(indent=True, relation_sort=original_order)
# - (methods are the same as PENMANCodec)
# * Triple(source, relation, target)
# * Graph(data=None, top=None)
# - Graph.top
# - Graph.variables()
# - Graph.triples(source=None, relation=None, target=None)
# - Graph.edges(source=None, relation=None, target=None)
# - Graph.attributes(source=None, relation=None, target=None)
# - Graph.reentrancies()
#
# Module Functions:
# * decode(s, cls=PENMANCodec, **kwargs)
# * encode(g, cls=PENMANCodec, **kwargs)
# * load(source, triples=False, cls=PENMANCodec, **kwargs)
# * loads(string, triples=False, cls=PENMANCodec, **kwargs)
# * dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs)
# * dumps(graphs, triples=False, cls=PENMANCodec, **kwargs)
# * original_order(triples)
# * out_first_order(triples)
# * alphanum_order(triples)
import re
from collections import namedtuple, defaultdict
try:
basestring
except NameError:
basestring = str
__version__ = '0.6.2'
__version_info__ = [
int(x) if x.isdigit() else x
for x in re.findall(r'[0-9]+|[^0-9\.-]+', __version__)
]
def original_order(triples):
"""
Return a list of triples in the original order.
"""
return triples
def out_first_order(triples):
"""
Sort a list of triples so outward (true) edges appear first.
"""
return sorted(triples, key=lambda t: t.inverted)
def alphanum_order(triples):
"""
Sort a list of triples by relation name.
Embedded integers are sorted numerically, but otherwise the sorting
is alphabetic.
"""
return sorted(
triples,
key=lambda t: [
int(t) if t.isdigit() else t
for t in re.split(r'([0-9]+)', t.relation or '')
]
)
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
class AMRCodec(PENMANCodec):
"""
An AMR codec for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
# vars: [a-z]+\d* ; first relation must be node type
NODE_ENTER_RE = re.compile(r'\s*(\()\s*(?=[a-z]+\d*\s*\/)')
NODETYPE_RE = PENMANCodec.ATOM_RE
VAR_RE = re.compile(r'([a-z]+\d*)')
# only non-anonymous relations
RELATION_RE = re.compile(r'(:[^\s(),]+)\s*')
_inversions = {
TYPE_REL: None, # don't allow inverted types
'domain': 'mod',
'consist-of': 'consist-of-of',
'prep-on-behalf-of': 'prep-on-behalf-of-of',
'prep-out-of': 'prep-out-of-of',
}
_deinversions = {
'mod': 'domain',
}
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return (
relation in self._deinversions or
(relation.endswith('-of') and relation not in self._inversions)
)
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
rel = self._deinversions.get(relation, relation[:-3])
else:
rel = self._inversions.get(relation, relation + '-of')
if rel is None:
raise PenmanError(
'Cannot (de)invert {}; not allowed'.format(relation)
)
return rel
class Triple(namedtuple('Triple', ('source', 'relation', 'target'))):
"""Container for Graph edges and node attributes."""
def __new__(cls, source, relation, target, inverted=None):
t = super(Triple, cls).__new__(
cls, source, relation, target
)
t.inverted = inverted
return t
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def triples(self, source=None, relation=None, target=None):
"""
Return triples filtered by their *source*, *relation*, or *target*.
"""
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
def edges(self, source=None, relation=None, target=None):
"""
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
"""
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
def attributes(self, source=None, relation=None, target=None):
"""
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
"""
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
def _regex(x, s, pos, msg):
m = x.match(s, pos=pos)
if m is None:
raise DecodeError('Expected {}'.format(msg), string=s, pos=pos)
return m
def _default_cast(x):
if isinstance(x, basestring):
if x.startswith('"'):
x = x # strip quotes?
elif re.match(
r'-?(0|[1-9]\d*)(\.\d+[eE][-+]?|\.|[eE][-+]?)\d+$', x):
x = float(x)
elif re.match(r'-?\d+$', x):
x = int(x)
return x
class PenmanError(Exception):
"""Base class for errors in the Penman package."""
class EncodeError(PenmanError):
"""Raises when encoding PENMAN-notation fails."""
class DecodeError(PenmanError):
"""Raised when decoding PENMAN-notation fails."""
def __init__(self, *args, **kwargs):
# Python2 doesn't allow parameters like:
# (*args, key=val, **kwargs)
# so do this manaully.
string = pos = None
if 'string' in kwargs:
string = kwargs['string']
del kwargs['string']
if 'pos' in kwargs:
pos = kwargs['pos']
del kwargs['pos']
super(DecodeError, self).__init__(*args, **kwargs)
self.string = string
self.pos = pos
def __str__(self):
if isinstance(self.pos, slice):
loc = ' in span {}:{}'.format(self.pos.start, self.pos.stop)
else:
loc = ' at position {}'.format(self.pos)
return Exception.__str__(self) + loc
def encode(g, top=None, cls=PENMANCodec, **kwargs):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized graph; if
unset, the original top of *g* is used
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
"""
codec = cls(**kwargs)
return codec.encode(g, top=top)
def load(source, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
decode = cls(**kwargs).iterdecode
if hasattr(source, 'read'):
return list(decode(source.read()))
else:
with open(source) as fh:
return list(decode(fh.read()))
def loads(string, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples))
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
triples: if True, write graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
"""
text = dumps(graphs, triples=triples, cls=cls, **kwargs)
if hasattr(file, 'write'):
print(text, file=file)
else:
with open(file, 'w') as fh:
print(text, file=fh)
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
triples: if True, write graphs as triples instead of as PENMAN
Returns:
the string of serialized graphs
"""
codec = cls(**kwargs)
strings = [codec.encode(g, triples=triples) for g in graphs]
return '\n\n'.join(strings)
def _main():
import sys
from docopt import docopt
args = docopt(USAGE, version='Penman {}'.format(__version__))
infile = args['--input'] or sys.stdin
outfile = args['--output'] or sys.stdout
codec = AMRCodec if args['--amr'] else PENMANCodec
indent = True
if args['--indent']:
if args['--indent'].lower() in ("no", "none", "false"):
indent = False
else:
try:
indent = int(args['--indent'])
if indent < 0:
raise ValueError
except ValueError:
sys.exit('error: --indent value must be "no" or a '
' positive integer')
data = load(infile, cls=codec)
dump(data, outfile, triples=args['--triples'], cls=codec, indent=indent)
if __name__ == '__main__':
_main()
|
goodmami/penman
|
penman.py
|
encode
|
python
|
def encode(g, top=None, cls=PENMANCodec, **kwargs):
codec = cls(**kwargs)
return codec.encode(g, top=top)
|
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized graph; if
unset, the original top of *g* is used
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L812-L830
|
[
"def encode(self, g, top=None, triples=False):\n \"\"\"\n Serialize the graph *g* from *top* to PENMAN notation.\n\n Args:\n g: the Graph object\n top: the node identifier for the top of the serialized\n graph; if unset, the original top of *g* is used\n triples: if True, serialize as a conjunction of logical triples\n Returns:\n the PENMAN-serialized string of the Graph *g*\n Example:\n\n >>> codec = PENMANCodec()\n >>> codec.encode(Graph([('h', 'instance', 'hi')]))\n (h / hi)\n >>> codec.encode(Graph([('h', 'instance', 'hi')]),\n ... triples=True)\n instance(h, hi)\n \"\"\"\n if len(g.triples()) == 0:\n raise EncodeError('Cannot encode empty graph.')\n if triples:\n return self._encode_triple_conjunction(g, top=top)\n else:\n return self._encode_penman(g, top=top)\n"
] |
#!/usr/bin/env python3
"""
PENMAN graph library for AMR, DMRS, etc.
Penman is a module to assist in working with graphs encoded in PENMAN
notation, such as those for Abstract Meaning Representation (AMR) or
Dependency Minimal Recursion Semantics (DMRS). It allows for conversion
between PENMAN and triples, inspection of the graphs, and
reserialization (e.g. for selecting a new top node). Some features,
such as conversion or reserialization, can be done by calling the
module as a script.
"""
from __future__ import print_function
USAGE = '''
Penman
An API and utility for working with graphs in PENMAN notation.
Usage: penman.py [-h|--help] [-V|--version] [options]
Options:
-h, --help display this help and exit
-V, --version display the version and exit
-i FILE, --input FILE read graphs from FILE instead of stdin
-o FILE, --output FILE write output to FILE instead of stdout
-t, --triples print graphs as triple conjunctions
--indent N indent N spaces per level ("no" for no newlines)
--amr use AMR codec instead of generic PENMAN one
'''
# API overview:
#
# Classes:
# * PENMANCodec(indent=True, relation_sort=original_order)
# - PENMANCodec.decode(s)
# - PENMANCodec.iterdecode(s)
# - PENMANCodec.encode(g, top=None)
# - PENMANCodec.is_relation_inverted(relation)
# - PENMANCodec.invert_relation(relation)
# - PENMANCodec.handle_triple(source, relation, target)
# - PENMANCodec.triples_to_graph(triples, top=None)
# * AMRCodec(indent=True, relation_sort=original_order)
# - (methods are the same as PENMANCodec)
# * Triple(source, relation, target)
# * Graph(data=None, top=None)
# - Graph.top
# - Graph.variables()
# - Graph.triples(source=None, relation=None, target=None)
# - Graph.edges(source=None, relation=None, target=None)
# - Graph.attributes(source=None, relation=None, target=None)
# - Graph.reentrancies()
#
# Module Functions:
# * decode(s, cls=PENMANCodec, **kwargs)
# * encode(g, cls=PENMANCodec, **kwargs)
# * load(source, triples=False, cls=PENMANCodec, **kwargs)
# * loads(string, triples=False, cls=PENMANCodec, **kwargs)
# * dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs)
# * dumps(graphs, triples=False, cls=PENMANCodec, **kwargs)
# * original_order(triples)
# * out_first_order(triples)
# * alphanum_order(triples)
import re
from collections import namedtuple, defaultdict
try:
basestring
except NameError:
basestring = str
__version__ = '0.6.2'
__version_info__ = [
int(x) if x.isdigit() else x
for x in re.findall(r'[0-9]+|[^0-9\.-]+', __version__)
]
def original_order(triples):
"""
Return a list of triples in the original order.
"""
return triples
def out_first_order(triples):
"""
Sort a list of triples so outward (true) edges appear first.
"""
return sorted(triples, key=lambda t: t.inverted)
def alphanum_order(triples):
"""
Sort a list of triples by relation name.
Embedded integers are sorted numerically, but otherwise the sorting
is alphabetic.
"""
return sorted(
triples,
key=lambda t: [
int(t) if t.isdigit() else t
for t in re.split(r'([0-9]+)', t.relation or '')
]
)
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
class AMRCodec(PENMANCodec):
"""
An AMR codec for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
# vars: [a-z]+\d* ; first relation must be node type
NODE_ENTER_RE = re.compile(r'\s*(\()\s*(?=[a-z]+\d*\s*\/)')
NODETYPE_RE = PENMANCodec.ATOM_RE
VAR_RE = re.compile(r'([a-z]+\d*)')
# only non-anonymous relations
RELATION_RE = re.compile(r'(:[^\s(),]+)\s*')
_inversions = {
TYPE_REL: None, # don't allow inverted types
'domain': 'mod',
'consist-of': 'consist-of-of',
'prep-on-behalf-of': 'prep-on-behalf-of-of',
'prep-out-of': 'prep-out-of-of',
}
_deinversions = {
'mod': 'domain',
}
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return (
relation in self._deinversions or
(relation.endswith('-of') and relation not in self._inversions)
)
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
rel = self._deinversions.get(relation, relation[:-3])
else:
rel = self._inversions.get(relation, relation + '-of')
if rel is None:
raise PenmanError(
'Cannot (de)invert {}; not allowed'.format(relation)
)
return rel
class Triple(namedtuple('Triple', ('source', 'relation', 'target'))):
"""Container for Graph edges and node attributes."""
def __new__(cls, source, relation, target, inverted=None):
t = super(Triple, cls).__new__(
cls, source, relation, target
)
t.inverted = inverted
return t
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def triples(self, source=None, relation=None, target=None):
"""
Return triples filtered by their *source*, *relation*, or *target*.
"""
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
def edges(self, source=None, relation=None, target=None):
"""
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
"""
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
def attributes(self, source=None, relation=None, target=None):
"""
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
"""
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
def _regex(x, s, pos, msg):
m = x.match(s, pos=pos)
if m is None:
raise DecodeError('Expected {}'.format(msg), string=s, pos=pos)
return m
def _default_cast(x):
if isinstance(x, basestring):
if x.startswith('"'):
x = x # strip quotes?
elif re.match(
r'-?(0|[1-9]\d*)(\.\d+[eE][-+]?|\.|[eE][-+]?)\d+$', x):
x = float(x)
elif re.match(r'-?\d+$', x):
x = int(x)
return x
class PenmanError(Exception):
"""Base class for errors in the Penman package."""
class EncodeError(PenmanError):
"""Raises when encoding PENMAN-notation fails."""
class DecodeError(PenmanError):
"""Raised when decoding PENMAN-notation fails."""
def __init__(self, *args, **kwargs):
# Python2 doesn't allow parameters like:
# (*args, key=val, **kwargs)
# so do this manaully.
string = pos = None
if 'string' in kwargs:
string = kwargs['string']
del kwargs['string']
if 'pos' in kwargs:
pos = kwargs['pos']
del kwargs['pos']
super(DecodeError, self).__init__(*args, **kwargs)
self.string = string
self.pos = pos
def __str__(self):
if isinstance(self.pos, slice):
loc = ' in span {}:{}'.format(self.pos.start, self.pos.stop)
else:
loc = ' at position {}'.format(self.pos)
return Exception.__str__(self) + loc
def decode(s, cls=PENMANCodec, **kwargs):
"""
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
"""
codec = cls(**kwargs)
return codec.decode(s)
def load(source, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
decode = cls(**kwargs).iterdecode
if hasattr(source, 'read'):
return list(decode(source.read()))
else:
with open(source) as fh:
return list(decode(fh.read()))
def loads(string, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples))
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
triples: if True, write graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
"""
text = dumps(graphs, triples=triples, cls=cls, **kwargs)
if hasattr(file, 'write'):
print(text, file=file)
else:
with open(file, 'w') as fh:
print(text, file=fh)
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
triples: if True, write graphs as triples instead of as PENMAN
Returns:
the string of serialized graphs
"""
codec = cls(**kwargs)
strings = [codec.encode(g, triples=triples) for g in graphs]
return '\n\n'.join(strings)
def _main():
import sys
from docopt import docopt
args = docopt(USAGE, version='Penman {}'.format(__version__))
infile = args['--input'] or sys.stdin
outfile = args['--output'] or sys.stdout
codec = AMRCodec if args['--amr'] else PENMANCodec
indent = True
if args['--indent']:
if args['--indent'].lower() in ("no", "none", "false"):
indent = False
else:
try:
indent = int(args['--indent'])
if indent < 0:
raise ValueError
except ValueError:
sys.exit('error: --indent value must be "no" or a '
' positive integer')
data = load(infile, cls=codec)
dump(data, outfile, triples=args['--triples'], cls=codec, indent=indent)
if __name__ == '__main__':
_main()
|
goodmami/penman
|
penman.py
|
load
|
python
|
def load(source, triples=False, cls=PENMANCodec, **kwargs):
decode = cls(**kwargs).iterdecode
if hasattr(source, 'read'):
return list(decode(source.read()))
else:
with open(source) as fh:
return list(decode(fh.read()))
|
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L833-L850
|
[
"def iterdecode(self, s, triples=False):\n \"\"\"\n Deserialize PENMAN-notation string *s* into its Graph objects.\n\n Args:\n s: a string containing zero or more PENMAN-serialized graphs\n triples: if True, treat *s* as a conjunction of logical triples\n Yields:\n valid Graph objects described by *s*\n Example:\n\n >>> codec = PENMANCodec()\n >>> list(codec.iterdecode('(h / hello)(g / goodbye)'))\n [<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]\n >>> list(codec.iterdecode(\n ... 'instance(h, hello)\\n'\n ... 'instance(g, goodbye)'\n ... ))\n [<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]\n \"\"\"\n pos, strlen = 0, len(s)\n while pos < strlen:\n if s[pos] == '#':\n while pos < strlen and s[pos] != '\\n':\n pos += 1\n elif triples or s[pos] == '(':\n try:\n if triples:\n span, data = self._decode_triple_conjunction(\n s, pos=pos\n )\n else:\n span, data = self._decode_penman_node(s, pos=pos)\n except (IndexError, DecodeError):\n # don't re-raise below for more robust parsing, but\n # for now, raising helps with debugging bad input\n raise\n pos += 1\n else:\n top, nodes, edges = data\n yield self.triples_to_graph(nodes + edges, top=top)\n pos = span[1]\n else:\n pos += 1\n"
] |
#!/usr/bin/env python3
"""
PENMAN graph library for AMR, DMRS, etc.
Penman is a module to assist in working with graphs encoded in PENMAN
notation, such as those for Abstract Meaning Representation (AMR) or
Dependency Minimal Recursion Semantics (DMRS). It allows for conversion
between PENMAN and triples, inspection of the graphs, and
reserialization (e.g. for selecting a new top node). Some features,
such as conversion or reserialization, can be done by calling the
module as a script.
"""
from __future__ import print_function
USAGE = '''
Penman
An API and utility for working with graphs in PENMAN notation.
Usage: penman.py [-h|--help] [-V|--version] [options]
Options:
-h, --help display this help and exit
-V, --version display the version and exit
-i FILE, --input FILE read graphs from FILE instead of stdin
-o FILE, --output FILE write output to FILE instead of stdout
-t, --triples print graphs as triple conjunctions
--indent N indent N spaces per level ("no" for no newlines)
--amr use AMR codec instead of generic PENMAN one
'''
# API overview:
#
# Classes:
# * PENMANCodec(indent=True, relation_sort=original_order)
# - PENMANCodec.decode(s)
# - PENMANCodec.iterdecode(s)
# - PENMANCodec.encode(g, top=None)
# - PENMANCodec.is_relation_inverted(relation)
# - PENMANCodec.invert_relation(relation)
# - PENMANCodec.handle_triple(source, relation, target)
# - PENMANCodec.triples_to_graph(triples, top=None)
# * AMRCodec(indent=True, relation_sort=original_order)
# - (methods are the same as PENMANCodec)
# * Triple(source, relation, target)
# * Graph(data=None, top=None)
# - Graph.top
# - Graph.variables()
# - Graph.triples(source=None, relation=None, target=None)
# - Graph.edges(source=None, relation=None, target=None)
# - Graph.attributes(source=None, relation=None, target=None)
# - Graph.reentrancies()
#
# Module Functions:
# * decode(s, cls=PENMANCodec, **kwargs)
# * encode(g, cls=PENMANCodec, **kwargs)
# * load(source, triples=False, cls=PENMANCodec, **kwargs)
# * loads(string, triples=False, cls=PENMANCodec, **kwargs)
# * dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs)
# * dumps(graphs, triples=False, cls=PENMANCodec, **kwargs)
# * original_order(triples)
# * out_first_order(triples)
# * alphanum_order(triples)
import re
from collections import namedtuple, defaultdict
try:
basestring
except NameError:
basestring = str
__version__ = '0.6.2'
__version_info__ = [
int(x) if x.isdigit() else x
for x in re.findall(r'[0-9]+|[^0-9\.-]+', __version__)
]
def original_order(triples):
"""
Return a list of triples in the original order.
"""
return triples
def out_first_order(triples):
"""
Sort a list of triples so outward (true) edges appear first.
"""
return sorted(triples, key=lambda t: t.inverted)
def alphanum_order(triples):
"""
Sort a list of triples by relation name.
Embedded integers are sorted numerically, but otherwise the sorting
is alphabetic.
"""
return sorted(
triples,
key=lambda t: [
int(t) if t.isdigit() else t
for t in re.split(r'([0-9]+)', t.relation or '')
]
)
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
class AMRCodec(PENMANCodec):
"""
An AMR codec for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
# vars: [a-z]+\d* ; first relation must be node type
NODE_ENTER_RE = re.compile(r'\s*(\()\s*(?=[a-z]+\d*\s*\/)')
NODETYPE_RE = PENMANCodec.ATOM_RE
VAR_RE = re.compile(r'([a-z]+\d*)')
# only non-anonymous relations
RELATION_RE = re.compile(r'(:[^\s(),]+)\s*')
_inversions = {
TYPE_REL: None, # don't allow inverted types
'domain': 'mod',
'consist-of': 'consist-of-of',
'prep-on-behalf-of': 'prep-on-behalf-of-of',
'prep-out-of': 'prep-out-of-of',
}
_deinversions = {
'mod': 'domain',
}
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return (
relation in self._deinversions or
(relation.endswith('-of') and relation not in self._inversions)
)
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
rel = self._deinversions.get(relation, relation[:-3])
else:
rel = self._inversions.get(relation, relation + '-of')
if rel is None:
raise PenmanError(
'Cannot (de)invert {}; not allowed'.format(relation)
)
return rel
class Triple(namedtuple('Triple', ('source', 'relation', 'target'))):
"""Container for Graph edges and node attributes."""
def __new__(cls, source, relation, target, inverted=None):
t = super(Triple, cls).__new__(
cls, source, relation, target
)
t.inverted = inverted
return t
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def triples(self, source=None, relation=None, target=None):
"""
Return triples filtered by their *source*, *relation*, or *target*.
"""
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
def edges(self, source=None, relation=None, target=None):
"""
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
"""
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
def attributes(self, source=None, relation=None, target=None):
"""
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
"""
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
def _regex(x, s, pos, msg):
m = x.match(s, pos=pos)
if m is None:
raise DecodeError('Expected {}'.format(msg), string=s, pos=pos)
return m
def _default_cast(x):
if isinstance(x, basestring):
if x.startswith('"'):
x = x # strip quotes?
elif re.match(
r'-?(0|[1-9]\d*)(\.\d+[eE][-+]?|\.|[eE][-+]?)\d+$', x):
x = float(x)
elif re.match(r'-?\d+$', x):
x = int(x)
return x
class PenmanError(Exception):
"""Base class for errors in the Penman package."""
class EncodeError(PenmanError):
"""Raises when encoding PENMAN-notation fails."""
class DecodeError(PenmanError):
"""Raised when decoding PENMAN-notation fails."""
def __init__(self, *args, **kwargs):
# Python2 doesn't allow parameters like:
# (*args, key=val, **kwargs)
# so do this manaully.
string = pos = None
if 'string' in kwargs:
string = kwargs['string']
del kwargs['string']
if 'pos' in kwargs:
pos = kwargs['pos']
del kwargs['pos']
super(DecodeError, self).__init__(*args, **kwargs)
self.string = string
self.pos = pos
def __str__(self):
if isinstance(self.pos, slice):
loc = ' in span {}:{}'.format(self.pos.start, self.pos.stop)
else:
loc = ' at position {}'.format(self.pos)
return Exception.__str__(self) + loc
def decode(s, cls=PENMANCodec, **kwargs):
"""
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
"""
codec = cls(**kwargs)
return codec.decode(s)
def encode(g, top=None, cls=PENMANCodec, **kwargs):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized graph; if
unset, the original top of *g* is used
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
"""
codec = cls(**kwargs)
return codec.encode(g, top=top)
def loads(string, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples))
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
triples: if True, write graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
"""
text = dumps(graphs, triples=triples, cls=cls, **kwargs)
if hasattr(file, 'write'):
print(text, file=file)
else:
with open(file, 'w') as fh:
print(text, file=fh)
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
triples: if True, write graphs as triples instead of as PENMAN
Returns:
the string of serialized graphs
"""
codec = cls(**kwargs)
strings = [codec.encode(g, triples=triples) for g in graphs]
return '\n\n'.join(strings)
def _main():
import sys
from docopt import docopt
args = docopt(USAGE, version='Penman {}'.format(__version__))
infile = args['--input'] or sys.stdin
outfile = args['--output'] or sys.stdout
codec = AMRCodec if args['--amr'] else PENMANCodec
indent = True
if args['--indent']:
if args['--indent'].lower() in ("no", "none", "false"):
indent = False
else:
try:
indent = int(args['--indent'])
if indent < 0:
raise ValueError
except ValueError:
sys.exit('error: --indent value must be "no" or a '
' positive integer')
data = load(infile, cls=codec)
dump(data, outfile, triples=args['--triples'], cls=codec, indent=indent)
if __name__ == '__main__':
_main()
|
goodmami/penman
|
penman.py
|
loads
|
python
|
def loads(string, triples=False, cls=PENMANCodec, **kwargs):
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples))
|
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L853-L866
|
[
"def iterdecode(self, s, triples=False):\n \"\"\"\n Deserialize PENMAN-notation string *s* into its Graph objects.\n\n Args:\n s: a string containing zero or more PENMAN-serialized graphs\n triples: if True, treat *s* as a conjunction of logical triples\n Yields:\n valid Graph objects described by *s*\n Example:\n\n >>> codec = PENMANCodec()\n >>> list(codec.iterdecode('(h / hello)(g / goodbye)'))\n [<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]\n >>> list(codec.iterdecode(\n ... 'instance(h, hello)\\n'\n ... 'instance(g, goodbye)'\n ... ))\n [<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]\n \"\"\"\n pos, strlen = 0, len(s)\n while pos < strlen:\n if s[pos] == '#':\n while pos < strlen and s[pos] != '\\n':\n pos += 1\n elif triples or s[pos] == '(':\n try:\n if triples:\n span, data = self._decode_triple_conjunction(\n s, pos=pos\n )\n else:\n span, data = self._decode_penman_node(s, pos=pos)\n except (IndexError, DecodeError):\n # don't re-raise below for more robust parsing, but\n # for now, raising helps with debugging bad input\n raise\n pos += 1\n else:\n top, nodes, edges = data\n yield self.triples_to_graph(nodes + edges, top=top)\n pos = span[1]\n else:\n pos += 1\n"
] |
#!/usr/bin/env python3
"""
PENMAN graph library for AMR, DMRS, etc.
Penman is a module to assist in working with graphs encoded in PENMAN
notation, such as those for Abstract Meaning Representation (AMR) or
Dependency Minimal Recursion Semantics (DMRS). It allows for conversion
between PENMAN and triples, inspection of the graphs, and
reserialization (e.g. for selecting a new top node). Some features,
such as conversion or reserialization, can be done by calling the
module as a script.
"""
from __future__ import print_function
USAGE = '''
Penman
An API and utility for working with graphs in PENMAN notation.
Usage: penman.py [-h|--help] [-V|--version] [options]
Options:
-h, --help display this help and exit
-V, --version display the version and exit
-i FILE, --input FILE read graphs from FILE instead of stdin
-o FILE, --output FILE write output to FILE instead of stdout
-t, --triples print graphs as triple conjunctions
--indent N indent N spaces per level ("no" for no newlines)
--amr use AMR codec instead of generic PENMAN one
'''
# API overview:
#
# Classes:
# * PENMANCodec(indent=True, relation_sort=original_order)
# - PENMANCodec.decode(s)
# - PENMANCodec.iterdecode(s)
# - PENMANCodec.encode(g, top=None)
# - PENMANCodec.is_relation_inverted(relation)
# - PENMANCodec.invert_relation(relation)
# - PENMANCodec.handle_triple(source, relation, target)
# - PENMANCodec.triples_to_graph(triples, top=None)
# * AMRCodec(indent=True, relation_sort=original_order)
# - (methods are the same as PENMANCodec)
# * Triple(source, relation, target)
# * Graph(data=None, top=None)
# - Graph.top
# - Graph.variables()
# - Graph.triples(source=None, relation=None, target=None)
# - Graph.edges(source=None, relation=None, target=None)
# - Graph.attributes(source=None, relation=None, target=None)
# - Graph.reentrancies()
#
# Module Functions:
# * decode(s, cls=PENMANCodec, **kwargs)
# * encode(g, cls=PENMANCodec, **kwargs)
# * load(source, triples=False, cls=PENMANCodec, **kwargs)
# * loads(string, triples=False, cls=PENMANCodec, **kwargs)
# * dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs)
# * dumps(graphs, triples=False, cls=PENMANCodec, **kwargs)
# * original_order(triples)
# * out_first_order(triples)
# * alphanum_order(triples)
import re
from collections import namedtuple, defaultdict
try:
basestring
except NameError:
basestring = str
__version__ = '0.6.2'
__version_info__ = [
int(x) if x.isdigit() else x
for x in re.findall(r'[0-9]+|[^0-9\.-]+', __version__)
]
def original_order(triples):
"""
Return a list of triples in the original order.
"""
return triples
def out_first_order(triples):
"""
Sort a list of triples so outward (true) edges appear first.
"""
return sorted(triples, key=lambda t: t.inverted)
def alphanum_order(triples):
"""
Sort a list of triples by relation name.
Embedded integers are sorted numerically, but otherwise the sorting
is alphabetic.
"""
return sorted(
triples,
key=lambda t: [
int(t) if t.isdigit() else t
for t in re.split(r'([0-9]+)', t.relation or '')
]
)
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
class AMRCodec(PENMANCodec):
"""
An AMR codec for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
# vars: [a-z]+\d* ; first relation must be node type
NODE_ENTER_RE = re.compile(r'\s*(\()\s*(?=[a-z]+\d*\s*\/)')
NODETYPE_RE = PENMANCodec.ATOM_RE
VAR_RE = re.compile(r'([a-z]+\d*)')
# only non-anonymous relations
RELATION_RE = re.compile(r'(:[^\s(),]+)\s*')
_inversions = {
TYPE_REL: None, # don't allow inverted types
'domain': 'mod',
'consist-of': 'consist-of-of',
'prep-on-behalf-of': 'prep-on-behalf-of-of',
'prep-out-of': 'prep-out-of-of',
}
_deinversions = {
'mod': 'domain',
}
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return (
relation in self._deinversions or
(relation.endswith('-of') and relation not in self._inversions)
)
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
rel = self._deinversions.get(relation, relation[:-3])
else:
rel = self._inversions.get(relation, relation + '-of')
if rel is None:
raise PenmanError(
'Cannot (de)invert {}; not allowed'.format(relation)
)
return rel
class Triple(namedtuple('Triple', ('source', 'relation', 'target'))):
"""Container for Graph edges and node attributes."""
def __new__(cls, source, relation, target, inverted=None):
t = super(Triple, cls).__new__(
cls, source, relation, target
)
t.inverted = inverted
return t
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def triples(self, source=None, relation=None, target=None):
"""
Return triples filtered by their *source*, *relation*, or *target*.
"""
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
def edges(self, source=None, relation=None, target=None):
"""
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
"""
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
def attributes(self, source=None, relation=None, target=None):
"""
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
"""
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
def _regex(x, s, pos, msg):
m = x.match(s, pos=pos)
if m is None:
raise DecodeError('Expected {}'.format(msg), string=s, pos=pos)
return m
def _default_cast(x):
if isinstance(x, basestring):
if x.startswith('"'):
x = x # strip quotes?
elif re.match(
r'-?(0|[1-9]\d*)(\.\d+[eE][-+]?|\.|[eE][-+]?)\d+$', x):
x = float(x)
elif re.match(r'-?\d+$', x):
x = int(x)
return x
class PenmanError(Exception):
"""Base class for errors in the Penman package."""
class EncodeError(PenmanError):
"""Raises when encoding PENMAN-notation fails."""
class DecodeError(PenmanError):
"""Raised when decoding PENMAN-notation fails."""
def __init__(self, *args, **kwargs):
# Python2 doesn't allow parameters like:
# (*args, key=val, **kwargs)
# so do this manaully.
string = pos = None
if 'string' in kwargs:
string = kwargs['string']
del kwargs['string']
if 'pos' in kwargs:
pos = kwargs['pos']
del kwargs['pos']
super(DecodeError, self).__init__(*args, **kwargs)
self.string = string
self.pos = pos
def __str__(self):
if isinstance(self.pos, slice):
loc = ' in span {}:{}'.format(self.pos.start, self.pos.stop)
else:
loc = ' at position {}'.format(self.pos)
return Exception.__str__(self) + loc
def decode(s, cls=PENMANCodec, **kwargs):
"""
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
"""
codec = cls(**kwargs)
return codec.decode(s)
def encode(g, top=None, cls=PENMANCodec, **kwargs):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized graph; if
unset, the original top of *g* is used
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
"""
codec = cls(**kwargs)
return codec.encode(g, top=top)
def load(source, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
decode = cls(**kwargs).iterdecode
if hasattr(source, 'read'):
return list(decode(source.read()))
else:
with open(source) as fh:
return list(decode(fh.read()))
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
triples: if True, write graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
"""
text = dumps(graphs, triples=triples, cls=cls, **kwargs)
if hasattr(file, 'write'):
print(text, file=file)
else:
with open(file, 'w') as fh:
print(text, file=fh)
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
triples: if True, write graphs as triples instead of as PENMAN
Returns:
the string of serialized graphs
"""
codec = cls(**kwargs)
strings = [codec.encode(g, triples=triples) for g in graphs]
return '\n\n'.join(strings)
def _main():
import sys
from docopt import docopt
args = docopt(USAGE, version='Penman {}'.format(__version__))
infile = args['--input'] or sys.stdin
outfile = args['--output'] or sys.stdout
codec = AMRCodec if args['--amr'] else PENMANCodec
indent = True
if args['--indent']:
if args['--indent'].lower() in ("no", "none", "false"):
indent = False
else:
try:
indent = int(args['--indent'])
if indent < 0:
raise ValueError
except ValueError:
sys.exit('error: --indent value must be "no" or a '
' positive integer')
data = load(infile, cls=codec)
dump(data, outfile, triples=args['--triples'], cls=codec, indent=indent)
if __name__ == '__main__':
_main()
|
goodmami/penman
|
penman.py
|
dump
|
python
|
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs):
text = dumps(graphs, triples=triples, cls=cls, **kwargs)
if hasattr(file, 'write'):
print(text, file=file)
else:
with open(file, 'w') as fh:
print(text, file=fh)
|
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
triples: if True, write graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L869-L886
|
[
"def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs):\n \"\"\"\n Serialize each graph in *graphs* to the PENMAN format.\n\n Args:\n graphs: an iterable of Graph objects\n triples: if True, write graphs as triples instead of as PENMAN\n Returns:\n the string of serialized graphs\n \"\"\"\n codec = cls(**kwargs)\n strings = [codec.encode(g, triples=triples) for g in graphs]\n return '\\n\\n'.join(strings)\n"
] |
#!/usr/bin/env python3
"""
PENMAN graph library for AMR, DMRS, etc.
Penman is a module to assist in working with graphs encoded in PENMAN
notation, such as those for Abstract Meaning Representation (AMR) or
Dependency Minimal Recursion Semantics (DMRS). It allows for conversion
between PENMAN and triples, inspection of the graphs, and
reserialization (e.g. for selecting a new top node). Some features,
such as conversion or reserialization, can be done by calling the
module as a script.
"""
from __future__ import print_function
USAGE = '''
Penman
An API and utility for working with graphs in PENMAN notation.
Usage: penman.py [-h|--help] [-V|--version] [options]
Options:
-h, --help display this help and exit
-V, --version display the version and exit
-i FILE, --input FILE read graphs from FILE instead of stdin
-o FILE, --output FILE write output to FILE instead of stdout
-t, --triples print graphs as triple conjunctions
--indent N indent N spaces per level ("no" for no newlines)
--amr use AMR codec instead of generic PENMAN one
'''
# API overview:
#
# Classes:
# * PENMANCodec(indent=True, relation_sort=original_order)
# - PENMANCodec.decode(s)
# - PENMANCodec.iterdecode(s)
# - PENMANCodec.encode(g, top=None)
# - PENMANCodec.is_relation_inverted(relation)
# - PENMANCodec.invert_relation(relation)
# - PENMANCodec.handle_triple(source, relation, target)
# - PENMANCodec.triples_to_graph(triples, top=None)
# * AMRCodec(indent=True, relation_sort=original_order)
# - (methods are the same as PENMANCodec)
# * Triple(source, relation, target)
# * Graph(data=None, top=None)
# - Graph.top
# - Graph.variables()
# - Graph.triples(source=None, relation=None, target=None)
# - Graph.edges(source=None, relation=None, target=None)
# - Graph.attributes(source=None, relation=None, target=None)
# - Graph.reentrancies()
#
# Module Functions:
# * decode(s, cls=PENMANCodec, **kwargs)
# * encode(g, cls=PENMANCodec, **kwargs)
# * load(source, triples=False, cls=PENMANCodec, **kwargs)
# * loads(string, triples=False, cls=PENMANCodec, **kwargs)
# * dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs)
# * dumps(graphs, triples=False, cls=PENMANCodec, **kwargs)
# * original_order(triples)
# * out_first_order(triples)
# * alphanum_order(triples)
import re
from collections import namedtuple, defaultdict
try:
basestring
except NameError:
basestring = str
__version__ = '0.6.2'
__version_info__ = [
int(x) if x.isdigit() else x
for x in re.findall(r'[0-9]+|[^0-9\.-]+', __version__)
]
def original_order(triples):
"""
Return a list of triples in the original order.
"""
return triples
def out_first_order(triples):
"""
Sort a list of triples so outward (true) edges appear first.
"""
return sorted(triples, key=lambda t: t.inverted)
def alphanum_order(triples):
"""
Sort a list of triples by relation name.
Embedded integers are sorted numerically, but otherwise the sorting
is alphabetic.
"""
return sorted(
triples,
key=lambda t: [
int(t) if t.isdigit() else t
for t in re.split(r'([0-9]+)', t.relation or '')
]
)
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
class AMRCodec(PENMANCodec):
"""
An AMR codec for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
# vars: [a-z]+\d* ; first relation must be node type
NODE_ENTER_RE = re.compile(r'\s*(\()\s*(?=[a-z]+\d*\s*\/)')
NODETYPE_RE = PENMANCodec.ATOM_RE
VAR_RE = re.compile(r'([a-z]+\d*)')
# only non-anonymous relations
RELATION_RE = re.compile(r'(:[^\s(),]+)\s*')
_inversions = {
TYPE_REL: None, # don't allow inverted types
'domain': 'mod',
'consist-of': 'consist-of-of',
'prep-on-behalf-of': 'prep-on-behalf-of-of',
'prep-out-of': 'prep-out-of-of',
}
_deinversions = {
'mod': 'domain',
}
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return (
relation in self._deinversions or
(relation.endswith('-of') and relation not in self._inversions)
)
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
rel = self._deinversions.get(relation, relation[:-3])
else:
rel = self._inversions.get(relation, relation + '-of')
if rel is None:
raise PenmanError(
'Cannot (de)invert {}; not allowed'.format(relation)
)
return rel
class Triple(namedtuple('Triple', ('source', 'relation', 'target'))):
"""Container for Graph edges and node attributes."""
def __new__(cls, source, relation, target, inverted=None):
t = super(Triple, cls).__new__(
cls, source, relation, target
)
t.inverted = inverted
return t
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def triples(self, source=None, relation=None, target=None):
"""
Return triples filtered by their *source*, *relation*, or *target*.
"""
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
def edges(self, source=None, relation=None, target=None):
"""
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
"""
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
def attributes(self, source=None, relation=None, target=None):
"""
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
"""
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
def _regex(x, s, pos, msg):
m = x.match(s, pos=pos)
if m is None:
raise DecodeError('Expected {}'.format(msg), string=s, pos=pos)
return m
def _default_cast(x):
if isinstance(x, basestring):
if x.startswith('"'):
x = x # strip quotes?
elif re.match(
r'-?(0|[1-9]\d*)(\.\d+[eE][-+]?|\.|[eE][-+]?)\d+$', x):
x = float(x)
elif re.match(r'-?\d+$', x):
x = int(x)
return x
class PenmanError(Exception):
"""Base class for errors in the Penman package."""
class EncodeError(PenmanError):
"""Raises when encoding PENMAN-notation fails."""
class DecodeError(PenmanError):
"""Raised when decoding PENMAN-notation fails."""
def __init__(self, *args, **kwargs):
# Python2 doesn't allow parameters like:
# (*args, key=val, **kwargs)
# so do this manaully.
string = pos = None
if 'string' in kwargs:
string = kwargs['string']
del kwargs['string']
if 'pos' in kwargs:
pos = kwargs['pos']
del kwargs['pos']
super(DecodeError, self).__init__(*args, **kwargs)
self.string = string
self.pos = pos
def __str__(self):
if isinstance(self.pos, slice):
loc = ' in span {}:{}'.format(self.pos.start, self.pos.stop)
else:
loc = ' at position {}'.format(self.pos)
return Exception.__str__(self) + loc
def decode(s, cls=PENMANCodec, **kwargs):
"""
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
"""
codec = cls(**kwargs)
return codec.decode(s)
def encode(g, top=None, cls=PENMANCodec, **kwargs):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized graph; if
unset, the original top of *g* is used
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
"""
codec = cls(**kwargs)
return codec.encode(g, top=top)
def load(source, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
decode = cls(**kwargs).iterdecode
if hasattr(source, 'read'):
return list(decode(source.read()))
else:
with open(source) as fh:
return list(decode(fh.read()))
def loads(string, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples))
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
triples: if True, write graphs as triples instead of as PENMAN
Returns:
the string of serialized graphs
"""
codec = cls(**kwargs)
strings = [codec.encode(g, triples=triples) for g in graphs]
return '\n\n'.join(strings)
def _main():
import sys
from docopt import docopt
args = docopt(USAGE, version='Penman {}'.format(__version__))
infile = args['--input'] or sys.stdin
outfile = args['--output'] or sys.stdout
codec = AMRCodec if args['--amr'] else PENMANCodec
indent = True
if args['--indent']:
if args['--indent'].lower() in ("no", "none", "false"):
indent = False
else:
try:
indent = int(args['--indent'])
if indent < 0:
raise ValueError
except ValueError:
sys.exit('error: --indent value must be "no" or a '
' positive integer')
data = load(infile, cls=codec)
dump(data, outfile, triples=args['--triples'], cls=codec, indent=indent)
if __name__ == '__main__':
_main()
|
goodmami/penman
|
penman.py
|
dumps
|
python
|
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs):
codec = cls(**kwargs)
strings = [codec.encode(g, triples=triples) for g in graphs]
return '\n\n'.join(strings)
|
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
triples: if True, write graphs as triples instead of as PENMAN
Returns:
the string of serialized graphs
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L889-L901
| null |
#!/usr/bin/env python3
"""
PENMAN graph library for AMR, DMRS, etc.
Penman is a module to assist in working with graphs encoded in PENMAN
notation, such as those for Abstract Meaning Representation (AMR) or
Dependency Minimal Recursion Semantics (DMRS). It allows for conversion
between PENMAN and triples, inspection of the graphs, and
reserialization (e.g. for selecting a new top node). Some features,
such as conversion or reserialization, can be done by calling the
module as a script.
"""
from __future__ import print_function
USAGE = '''
Penman
An API and utility for working with graphs in PENMAN notation.
Usage: penman.py [-h|--help] [-V|--version] [options]
Options:
-h, --help display this help and exit
-V, --version display the version and exit
-i FILE, --input FILE read graphs from FILE instead of stdin
-o FILE, --output FILE write output to FILE instead of stdout
-t, --triples print graphs as triple conjunctions
--indent N indent N spaces per level ("no" for no newlines)
--amr use AMR codec instead of generic PENMAN one
'''
# API overview:
#
# Classes:
# * PENMANCodec(indent=True, relation_sort=original_order)
# - PENMANCodec.decode(s)
# - PENMANCodec.iterdecode(s)
# - PENMANCodec.encode(g, top=None)
# - PENMANCodec.is_relation_inverted(relation)
# - PENMANCodec.invert_relation(relation)
# - PENMANCodec.handle_triple(source, relation, target)
# - PENMANCodec.triples_to_graph(triples, top=None)
# * AMRCodec(indent=True, relation_sort=original_order)
# - (methods are the same as PENMANCodec)
# * Triple(source, relation, target)
# * Graph(data=None, top=None)
# - Graph.top
# - Graph.variables()
# - Graph.triples(source=None, relation=None, target=None)
# - Graph.edges(source=None, relation=None, target=None)
# - Graph.attributes(source=None, relation=None, target=None)
# - Graph.reentrancies()
#
# Module Functions:
# * decode(s, cls=PENMANCodec, **kwargs)
# * encode(g, cls=PENMANCodec, **kwargs)
# * load(source, triples=False, cls=PENMANCodec, **kwargs)
# * loads(string, triples=False, cls=PENMANCodec, **kwargs)
# * dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs)
# * dumps(graphs, triples=False, cls=PENMANCodec, **kwargs)
# * original_order(triples)
# * out_first_order(triples)
# * alphanum_order(triples)
import re
from collections import namedtuple, defaultdict
try:
basestring
except NameError:
basestring = str
__version__ = '0.6.2'
__version_info__ = [
int(x) if x.isdigit() else x
for x in re.findall(r'[0-9]+|[^0-9\.-]+', __version__)
]
def original_order(triples):
"""
Return a list of triples in the original order.
"""
return triples
def out_first_order(triples):
"""
Sort a list of triples so outward (true) edges appear first.
"""
return sorted(triples, key=lambda t: t.inverted)
def alphanum_order(triples):
"""
Sort a list of triples by relation name.
Embedded integers are sorted numerically, but otherwise the sorting
is alphabetic.
"""
return sorted(
triples,
key=lambda t: [
int(t) if t.isdigit() else t
for t in re.split(r'([0-9]+)', t.relation or '')
]
)
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
class AMRCodec(PENMANCodec):
"""
An AMR codec for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
# vars: [a-z]+\d* ; first relation must be node type
NODE_ENTER_RE = re.compile(r'\s*(\()\s*(?=[a-z]+\d*\s*\/)')
NODETYPE_RE = PENMANCodec.ATOM_RE
VAR_RE = re.compile(r'([a-z]+\d*)')
# only non-anonymous relations
RELATION_RE = re.compile(r'(:[^\s(),]+)\s*')
_inversions = {
TYPE_REL: None, # don't allow inverted types
'domain': 'mod',
'consist-of': 'consist-of-of',
'prep-on-behalf-of': 'prep-on-behalf-of-of',
'prep-out-of': 'prep-out-of-of',
}
_deinversions = {
'mod': 'domain',
}
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return (
relation in self._deinversions or
(relation.endswith('-of') and relation not in self._inversions)
)
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
rel = self._deinversions.get(relation, relation[:-3])
else:
rel = self._inversions.get(relation, relation + '-of')
if rel is None:
raise PenmanError(
'Cannot (de)invert {}; not allowed'.format(relation)
)
return rel
class Triple(namedtuple('Triple', ('source', 'relation', 'target'))):
"""Container for Graph edges and node attributes."""
def __new__(cls, source, relation, target, inverted=None):
t = super(Triple, cls).__new__(
cls, source, relation, target
)
t.inverted = inverted
return t
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def triples(self, source=None, relation=None, target=None):
"""
Return triples filtered by their *source*, *relation*, or *target*.
"""
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
def edges(self, source=None, relation=None, target=None):
"""
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
"""
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
def attributes(self, source=None, relation=None, target=None):
"""
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
"""
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
def _regex(x, s, pos, msg):
m = x.match(s, pos=pos)
if m is None:
raise DecodeError('Expected {}'.format(msg), string=s, pos=pos)
return m
def _default_cast(x):
if isinstance(x, basestring):
if x.startswith('"'):
x = x # strip quotes?
elif re.match(
r'-?(0|[1-9]\d*)(\.\d+[eE][-+]?|\.|[eE][-+]?)\d+$', x):
x = float(x)
elif re.match(r'-?\d+$', x):
x = int(x)
return x
class PenmanError(Exception):
"""Base class for errors in the Penman package."""
class EncodeError(PenmanError):
"""Raises when encoding PENMAN-notation fails."""
class DecodeError(PenmanError):
"""Raised when decoding PENMAN-notation fails."""
def __init__(self, *args, **kwargs):
# Python2 doesn't allow parameters like:
# (*args, key=val, **kwargs)
# so do this manaully.
string = pos = None
if 'string' in kwargs:
string = kwargs['string']
del kwargs['string']
if 'pos' in kwargs:
pos = kwargs['pos']
del kwargs['pos']
super(DecodeError, self).__init__(*args, **kwargs)
self.string = string
self.pos = pos
def __str__(self):
if isinstance(self.pos, slice):
loc = ' in span {}:{}'.format(self.pos.start, self.pos.stop)
else:
loc = ' at position {}'.format(self.pos)
return Exception.__str__(self) + loc
def decode(s, cls=PENMANCodec, **kwargs):
"""
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
"""
codec = cls(**kwargs)
return codec.decode(s)
def encode(g, top=None, cls=PENMANCodec, **kwargs):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized graph; if
unset, the original top of *g* is used
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
"""
codec = cls(**kwargs)
return codec.encode(g, top=top)
def load(source, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
decode = cls(**kwargs).iterdecode
if hasattr(source, 'read'):
return list(decode(source.read()))
else:
with open(source) as fh:
return list(decode(fh.read()))
def loads(string, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples))
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs):
"""
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
triples: if True, write graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
"""
text = dumps(graphs, triples=triples, cls=cls, **kwargs)
if hasattr(file, 'write'):
print(text, file=file)
else:
with open(file, 'w') as fh:
print(text, file=fh)
def _main():
import sys
from docopt import docopt
args = docopt(USAGE, version='Penman {}'.format(__version__))
infile = args['--input'] or sys.stdin
outfile = args['--output'] or sys.stdout
codec = AMRCodec if args['--amr'] else PENMANCodec
indent = True
if args['--indent']:
if args['--indent'].lower() in ("no", "none", "false"):
indent = False
else:
try:
indent = int(args['--indent'])
if indent < 0:
raise ValueError
except ValueError:
sys.exit('error: --indent value must be "no" or a '
' positive integer')
data = load(infile, cls=codec)
dump(data, outfile, triples=args['--triples'], cls=codec, indent=indent)
if __name__ == '__main__':
_main()
|
goodmami/penman
|
penman.py
|
PENMANCodec.decode
|
python
|
def decode(self, s, triples=False):
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
|
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L148-L178
|
[
"def triples_to_graph(self, triples, top=None):\n \"\"\"\n Create a Graph from *triples* considering codec configuration.\n\n The Graph class does not know about information in the codec,\n so if Graph instantiation depends on special `TYPE_REL` or\n `TOP_VAR` values, use this function instead of instantiating\n a Graph object directly. This is also where edge\n normalization (de-inversion) and value type conversion occur\n (via handle_triple()).\n\n Args:\n triples: an iterable of (lhs, relation, rhs) triples\n top: node identifier of the top node\n Returns:\n a Graph object\n \"\"\"\n inferred_top = triples[0][0] if triples else None\n ts = []\n for triple in triples:\n if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:\n inferred_top = triple[2]\n else:\n ts.append(self.handle_triple(*triple))\n top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target\n return Graph(ts, top=top or inferred_top)\n",
"def _decode_triple_conjunction(self, s, pos=0):\n top, nodes, edges = None, [], []\n start = None\n while True:\n m = _regex(self.ATOM_RE, s, pos, \"a relation/predicate\")\n if start is None:\n start = m.start(1)\n pos, rel = m.end(0), m.group(1)\n\n m = _regex(self.NODE_ENTER_RE, s, pos, '\"(\"')\n pos = m.end(0)\n\n m = _regex(self.VAR_RE, s, pos, \"a variable (node identifier)\")\n pos, var = m.end(0), m.group(1).strip()\n\n m = _regex(self.COMMA_RE, s, pos, '\",\"')\n pos = m.end(0)\n\n if rel == self.TYPE_REL:\n m = _regex(self.NODETYPE_RE, s, pos, 'a node type')\n else:\n if s[pos] == '\"':\n m = _regex(self.STRING_RE, s, pos, 'a quoted string')\n else:\n m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')\n pos, tgt = m.end(0), m.group(1)\n\n if var == self.TOP_VAR and rel == self.TOP_REL:\n top = tgt\n elif rel == self.TYPE_REL:\n nodes.append((var, rel, tgt))\n else:\n edges.append((var, rel, tgt))\n\n m = _regex(self.NODE_EXIT_RE, s, pos, '\")\"')\n pos = m.end(1)\n\n if m.end(0) < len(s) and s[m.end(0)] == '^':\n pos = m.end(0) + 1\n else:\n break\n if top is None and nodes:\n top = nodes[0][0]\n return (start, pos), (top, nodes, edges)\n",
"def _decode_penman_node(self, s, pos=0):\n nodes, edges = [], []\n\n strlen = len(s)\n m = _regex(self.NODE_ENTER_RE, s, pos, '\"(\"')\n start, pos = m.start(1), m.end(0)\n\n m = _regex(self.VAR_RE, s, pos, \"a variable (node identifier)\")\n pos, var = m.end(0), m.group(1).strip()\n\n nodetype = None\n while pos < strlen and s[pos] != ')':\n\n # node type\n if s[pos] == '/':\n pos = self.SPACING_RE.match(s, pos=pos+1).end()\n m = _regex(self.NODETYPE_RE, s, pos, 'a node type')\n pos, nodetype = m.end(0), m.group(1)\n\n # relation\n elif s[pos] == ':':\n m = _regex(self.RELATION_RE, s, pos, 'a relation')\n pos, rel = m.end(0), m.group(1)\n\n # node value\n if s[pos] == '(':\n span, data = self._decode_penman_node(s, pos=pos)\n pos = span[1]\n subtop, subnodes, subedges = data\n nodes.extend(subnodes)\n edges.append((var, rel, subtop))\n edges.extend(subedges)\n\n # string or other atom value\n else:\n if s[pos] == '\"':\n m = _regex(self.STRING_RE, s, pos, 'a quoted string')\n pos, value = m.end(0), m.group(1)\n else:\n m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')\n pos, value = m.end(0), m.group(1)\n edges.append((var, rel, value))\n\n elif s[pos].isspace():\n pos += 1\n\n # error\n else:\n raise DecodeError('Expected \":\" or \"/\"', string=s, pos=pos)\n\n m = _regex(self.NODE_EXIT_RE, s, pos, '\")\"')\n pos = m.end(1)\n\n nodes = [(var, self.TYPE_REL, nodetype)] + nodes\n\n return (start, pos), (var, nodes, edges)\n"
] |
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
|
goodmami/penman
|
penman.py
|
PENMANCodec.iterdecode
|
python
|
def iterdecode(self, s, triples=False):
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
|
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L180-L223
|
[
"def triples_to_graph(self, triples, top=None):\n \"\"\"\n Create a Graph from *triples* considering codec configuration.\n\n The Graph class does not know about information in the codec,\n so if Graph instantiation depends on special `TYPE_REL` or\n `TOP_VAR` values, use this function instead of instantiating\n a Graph object directly. This is also where edge\n normalization (de-inversion) and value type conversion occur\n (via handle_triple()).\n\n Args:\n triples: an iterable of (lhs, relation, rhs) triples\n top: node identifier of the top node\n Returns:\n a Graph object\n \"\"\"\n inferred_top = triples[0][0] if triples else None\n ts = []\n for triple in triples:\n if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:\n inferred_top = triple[2]\n else:\n ts.append(self.handle_triple(*triple))\n top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target\n return Graph(ts, top=top or inferred_top)\n",
"def _decode_triple_conjunction(self, s, pos=0):\n top, nodes, edges = None, [], []\n start = None\n while True:\n m = _regex(self.ATOM_RE, s, pos, \"a relation/predicate\")\n if start is None:\n start = m.start(1)\n pos, rel = m.end(0), m.group(1)\n\n m = _regex(self.NODE_ENTER_RE, s, pos, '\"(\"')\n pos = m.end(0)\n\n m = _regex(self.VAR_RE, s, pos, \"a variable (node identifier)\")\n pos, var = m.end(0), m.group(1).strip()\n\n m = _regex(self.COMMA_RE, s, pos, '\",\"')\n pos = m.end(0)\n\n if rel == self.TYPE_REL:\n m = _regex(self.NODETYPE_RE, s, pos, 'a node type')\n else:\n if s[pos] == '\"':\n m = _regex(self.STRING_RE, s, pos, 'a quoted string')\n else:\n m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')\n pos, tgt = m.end(0), m.group(1)\n\n if var == self.TOP_VAR and rel == self.TOP_REL:\n top = tgt\n elif rel == self.TYPE_REL:\n nodes.append((var, rel, tgt))\n else:\n edges.append((var, rel, tgt))\n\n m = _regex(self.NODE_EXIT_RE, s, pos, '\")\"')\n pos = m.end(1)\n\n if m.end(0) < len(s) and s[m.end(0)] == '^':\n pos = m.end(0) + 1\n else:\n break\n if top is None and nodes:\n top = nodes[0][0]\n return (start, pos), (top, nodes, edges)\n",
"def _decode_penman_node(self, s, pos=0):\n nodes, edges = [], []\n\n strlen = len(s)\n m = _regex(self.NODE_ENTER_RE, s, pos, '\"(\"')\n start, pos = m.start(1), m.end(0)\n\n m = _regex(self.VAR_RE, s, pos, \"a variable (node identifier)\")\n pos, var = m.end(0), m.group(1).strip()\n\n nodetype = None\n while pos < strlen and s[pos] != ')':\n\n # node type\n if s[pos] == '/':\n pos = self.SPACING_RE.match(s, pos=pos+1).end()\n m = _regex(self.NODETYPE_RE, s, pos, 'a node type')\n pos, nodetype = m.end(0), m.group(1)\n\n # relation\n elif s[pos] == ':':\n m = _regex(self.RELATION_RE, s, pos, 'a relation')\n pos, rel = m.end(0), m.group(1)\n\n # node value\n if s[pos] == '(':\n span, data = self._decode_penman_node(s, pos=pos)\n pos = span[1]\n subtop, subnodes, subedges = data\n nodes.extend(subnodes)\n edges.append((var, rel, subtop))\n edges.extend(subedges)\n\n # string or other atom value\n else:\n if s[pos] == '\"':\n m = _regex(self.STRING_RE, s, pos, 'a quoted string')\n pos, value = m.end(0), m.group(1)\n else:\n m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')\n pos, value = m.end(0), m.group(1)\n edges.append((var, rel, value))\n\n elif s[pos].isspace():\n pos += 1\n\n # error\n else:\n raise DecodeError('Expected \":\" or \"/\"', string=s, pos=pos)\n\n m = _regex(self.NODE_EXIT_RE, s, pos, '\")\"')\n pos = m.end(1)\n\n nodes = [(var, self.TYPE_REL, nodetype)] + nodes\n\n return (start, pos), (var, nodes, edges)\n"
] |
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
|
goodmami/penman
|
penman.py
|
PENMANCodec.encode
|
python
|
def encode(self, g, top=None, triples=False):
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
|
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L225-L250
|
[
"def _encode_penman(self, g, top=None):\n \"\"\"\n Walk graph g and find a spanning dag, then serialize the result.\n\n First, depth-first traversal of preferred orientations (whether\n true or inverted) to create graph p.\n\n If any triples remain, select the first remaining triple whose\n source in the dispreferred orientation exists in p, where\n 'first' is determined by the order of inserted nodes (i.e. a\n topological sort). Add this triple, then repeat the depth-first\n traversal of preferred orientations from its target. Repeat\n until no triples remain, or raise an error if there are no\n candidates in the dispreferred orientation (which likely means\n the graph is disconnected).\n \"\"\"\n if top is None:\n top = g.top\n remaining = set(g.triples())\n variables = g.variables()\n store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)\n for t in g.triples():\n if t.inverted:\n store[t.target][0].append(t)\n store[t.source][1].append(Triple(*t, inverted=False))\n else:\n store[t.source][0].append(t)\n store[t.target][1].append(Triple(*t, inverted=True))\n\n p = defaultdict(list)\n topolist = [top]\n\n def _update(t):\n src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])\n p[src].append(t)\n remaining.remove(t)\n if tgt in variables and t.relation != self.TYPE_REL:\n topolist.append(tgt)\n return tgt\n return None\n\n def _explore_preferred(src):\n ts = store.get(src, ([], []))[0]\n for t in ts:\n if t in remaining:\n tgt = _update(t)\n if tgt is not None:\n _explore_preferred(tgt)\n ts[:] = [] # clear explored list\n\n _explore_preferred(top)\n\n while remaining:\n flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]\n for fc in flip_candidates:\n fc[:] = [c for c in fc if c in remaining] # clear superfluous\n if not any(len(fc) > 0 for fc in flip_candidates):\n raise EncodeError('Invalid graph; possibly disconnected.')\n c = next(c for fc in flip_candidates for c in fc)\n tgt = _update(c)\n if tgt is not None:\n _explore_preferred(tgt)\n\n return self._layout(p, top, 0, set())\n",
"def _encode_triple_conjunction(self, g, top=None):\n if top is None:\n top = g.top\n if self.TOP_VAR is not None and top is not None:\n top_triple = [(self.TOP_VAR, self.TOP_REL, top)]\n else:\n top_triple = []\n return ' ^\\n'.join(\n map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())\n )\n",
"def triples(self, source=None, relation=None, target=None):\n \"\"\"\n Return triples filtered by their *source*, *relation*, or *target*.\n \"\"\"\n triplematch = lambda t: (\n (source is None or source == t.source) and\n (relation is None or relation == t.relation) and\n (target is None or target == t.target)\n )\n return list(filter(triplematch, self._triples))\n"
] |
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
|
goodmami/penman
|
penman.py
|
PENMANCodec.handle_triple
|
python
|
def handle_triple(self, lhs, relation, rhs):
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
|
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L267-L304
|
[
"def _default_cast(x):\n if isinstance(x, basestring):\n if x.startswith('\"'):\n x = x # strip quotes?\n elif re.match(\n r'-?(0|[1-9]\\d*)(\\.\\d+[eE][-+]?|\\.|[eE][-+]?)\\d+$', x):\n x = float(x)\n elif re.match(r'-?\\d+$', x):\n x = int(x)\n return x\n",
"def is_relation_inverted(self, relation):\n \"\"\"\n Return True if *relation* is inverted.\n \"\"\"\n return relation and relation.endswith('-of')\n"
] |
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
|
goodmami/penman
|
penman.py
|
PENMANCodec.triples_to_graph
|
python
|
def triples_to_graph(self, triples, top=None):
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
|
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L306-L331
|
[
"def handle_triple(self, lhs, relation, rhs):\n \"\"\"\n Process triples before they are added to the graph.\n\n Note that *lhs* and *rhs* are as they originally appeared, and\n may be inverted. Inversions are detected by\n is_relation_inverted() and de-inverted by invert_relation().\n\n By default, this function:\n * removes initial colons on relations\n * de-inverts all inverted relations\n * sets empty relations to `None`\n * casts numeric string sources and targets to their numeric\n types (e.g. float, int)\n\n Args:\n lhs: the left hand side of an observed triple\n relation: the triple relation (possibly inverted)\n rhs: the right hand side of an observed triple\n Returns:\n The processed (source, relation, target) triple. By default,\n it is returned as a Triple object.\n \"\"\"\n relation = relation.replace(':', '', 1) # remove leading :\n\n if self.is_relation_inverted(relation): # deinvert\n source, target, inverted = rhs, lhs, True\n relation = self.invert_relation(relation)\n else:\n source, target, inverted = lhs, rhs, False\n\n source = _default_cast(source)\n target = _default_cast(target)\n\n if relation == '': # set empty relations to None\n relation = None\n\n return Triple(source, relation, target, inverted)\n"
] |
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _encode_penman(self, g, top=None):
"""
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
"""
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
|
goodmami/penman
|
penman.py
|
PENMANCodec._encode_penman
|
python
|
def _encode_penman(self, g, top=None):
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
|
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L436-L499
|
[
"def _layout(self, g, src, offset, seen):\n indent = self.indent\n if src not in g or len(g.get(src, [])) == 0 or src in seen:\n return src\n seen.add(src)\n branches = []\n outedges = self.relation_sort(g[src])\n head = '({}'.format(src)\n if indent is True:\n offset += len(head) + 1 # + 1 for space after src (added later)\n elif indent is not None and indent is not False:\n offset += indent\n for t in outedges:\n if t.relation == self.TYPE_REL:\n if t.target is not None:\n # node types always come first\n branches = ['/ {}'.format(t.target)] + branches\n else:\n if t.inverted:\n tgt = t.source\n rel = self.invert_relation(t.relation)\n else:\n tgt = t.target\n rel = t.relation or ''\n inner_offset = (len(rel) + 2) if indent is True else 0\n branch = self._layout(g, tgt, offset + inner_offset, seen)\n branches.append(':{} {}'.format(rel, branch))\n if branches:\n head += ' '\n delim = ' ' if (indent is None or indent is False) else '\\n'\n tail = (delim + (' ' * offset)).join(branches) + ')'\n return head + tail\n",
"def _update(t):\n src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])\n p[src].append(t)\n remaining.remove(t)\n if tgt in variables and t.relation != self.TYPE_REL:\n topolist.append(tgt)\n return tgt\n return None\n",
"def _explore_preferred(src):\n ts = store.get(src, ([], []))[0]\n for t in ts:\n if t in remaining:\n tgt = _update(t)\n if tgt is not None:\n _explore_preferred(tgt)\n ts[:] = [] # clear explored list\n"
] |
class PENMANCodec(object):
"""
A parameterized encoder/decoder for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
NODE_ENTER_RE = re.compile(r'\s*(\()\s*')
NODE_EXIT_RE = re.compile(r'\s*(\))\s*')
RELATION_RE = re.compile(r'(:[^\s(),]*)\s*')
INT_RE = re.compile(r'[+-]?\d+')
FLOAT_RE = re.compile(
r'[-+]?(((\d+\.\d*|\.\d+)([eE][-+]?\d+)?)|\d+[eE][-+]?\d+)'
)
ATOM_RE = re.compile(r'([^\s()\/,]+)')
STRING_RE = re.compile(r'("[^"\\]*(?:\\.[^"\\]*)*")')
VAR_RE = re.compile('({}|{})'.format(STRING_RE.pattern, ATOM_RE.pattern))
NODETYPE_RE = VAR_RE # default; allow strings, numbers, and symbols
COMMA_RE = re.compile(r'\s*,\s*')
SPACING_RE = re.compile(r'\s*')
def __init__(self, indent=True, relation_sort=original_order):
"""
Initialize a new codec.
Args:
indent: if True, adaptively indent; if False or None, don't
indent; if a non-negative integer, indent that many
spaces per nesting level
relation_sort: when encoding, sort the relations on each
node according to this function; by default, the
original order is maintained
"""
self.indent = indent
self.relation_sort = relation_sort
def decode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
"""
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
def iterdecode(self, s, triples=False):
"""
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
"""
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
def encode(self, g, top=None, triples=False):
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
"""
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return relation and relation.endswith('-of')
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
return relation[:-3] or None
else:
return (relation or '') + '-of'
def handle_triple(self, lhs, relation, rhs):
"""
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
"""
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
def triples_to_graph(self, triples, top=None):
"""
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
"""
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
def _decode_triple_conjunction(self, s, pos=0):
top, nodes, edges = None, [], []
start = None
while True:
m = _regex(self.ATOM_RE, s, pos, "a relation/predicate")
if start is None:
start = m.start(1)
pos, rel = m.end(0), m.group(1)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
pos = m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
m = _regex(self.COMMA_RE, s, pos, '","')
pos = m.end(0)
if rel == self.TYPE_REL:
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, tgt = m.end(0), m.group(1)
if var == self.TOP_VAR and rel == self.TOP_REL:
top = tgt
elif rel == self.TYPE_REL:
nodes.append((var, rel, tgt))
else:
edges.append((var, rel, tgt))
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
if m.end(0) < len(s) and s[m.end(0)] == '^':
pos = m.end(0) + 1
else:
break
if top is None and nodes:
top = nodes[0][0]
return (start, pos), (top, nodes, edges)
def _decode_penman_node(self, s, pos=0):
nodes, edges = [], []
strlen = len(s)
m = _regex(self.NODE_ENTER_RE, s, pos, '"("')
start, pos = m.start(1), m.end(0)
m = _regex(self.VAR_RE, s, pos, "a variable (node identifier)")
pos, var = m.end(0), m.group(1).strip()
nodetype = None
while pos < strlen and s[pos] != ')':
# node type
if s[pos] == '/':
pos = self.SPACING_RE.match(s, pos=pos+1).end()
m = _regex(self.NODETYPE_RE, s, pos, 'a node type')
pos, nodetype = m.end(0), m.group(1)
# relation
elif s[pos] == ':':
m = _regex(self.RELATION_RE, s, pos, 'a relation')
pos, rel = m.end(0), m.group(1)
# node value
if s[pos] == '(':
span, data = self._decode_penman_node(s, pos=pos)
pos = span[1]
subtop, subnodes, subedges = data
nodes.extend(subnodes)
edges.append((var, rel, subtop))
edges.extend(subedges)
# string or other atom value
else:
if s[pos] == '"':
m = _regex(self.STRING_RE, s, pos, 'a quoted string')
pos, value = m.end(0), m.group(1)
else:
m = _regex(self.ATOM_RE, s, pos, 'a float/int/symbol')
pos, value = m.end(0), m.group(1)
edges.append((var, rel, value))
elif s[pos].isspace():
pos += 1
# error
else:
raise DecodeError('Expected ":" or "/"', string=s, pos=pos)
m = _regex(self.NODE_EXIT_RE, s, pos, '")"')
pos = m.end(1)
nodes = [(var, self.TYPE_REL, nodetype)] + nodes
return (start, pos), (var, nodes, edges)
def _layout(self, g, src, offset, seen):
indent = self.indent
if src not in g or len(g.get(src, [])) == 0 or src in seen:
return src
seen.add(src)
branches = []
outedges = self.relation_sort(g[src])
head = '({}'.format(src)
if indent is True:
offset += len(head) + 1 # + 1 for space after src (added later)
elif indent is not None and indent is not False:
offset += indent
for t in outedges:
if t.relation == self.TYPE_REL:
if t.target is not None:
# node types always come first
branches = ['/ {}'.format(t.target)] + branches
else:
if t.inverted:
tgt = t.source
rel = self.invert_relation(t.relation)
else:
tgt = t.target
rel = t.relation or ''
inner_offset = (len(rel) + 2) if indent is True else 0
branch = self._layout(g, tgt, offset + inner_offset, seen)
branches.append(':{} {}'.format(rel, branch))
if branches:
head += ' '
delim = ' ' if (indent is None or indent is False) else '\n'
tail = (delim + (' ' * offset)).join(branches) + ')'
return head + tail
def _encode_triple_conjunction(self, g, top=None):
if top is None:
top = g.top
if self.TOP_VAR is not None and top is not None:
top_triple = [(self.TOP_VAR, self.TOP_REL, top)]
else:
top_triple = []
return ' ^\n'.join(
map('{0[1]}({0[0]}, {0[2]})'.format, top_triple + g.triples())
)
|
goodmami/penman
|
penman.py
|
AMRCodec.is_relation_inverted
|
python
|
def is_relation_inverted(self, relation):
return (
relation in self._deinversions or
(relation.endswith('-of') and relation not in self._inversions)
)
|
Return True if *relation* is inverted.
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L571-L578
| null |
class AMRCodec(PENMANCodec):
"""
An AMR codec for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
# vars: [a-z]+\d* ; first relation must be node type
NODE_ENTER_RE = re.compile(r'\s*(\()\s*(?=[a-z]+\d*\s*\/)')
NODETYPE_RE = PENMANCodec.ATOM_RE
VAR_RE = re.compile(r'([a-z]+\d*)')
# only non-anonymous relations
RELATION_RE = re.compile(r'(:[^\s(),]+)\s*')
_inversions = {
TYPE_REL: None, # don't allow inverted types
'domain': 'mod',
'consist-of': 'consist-of-of',
'prep-on-behalf-of': 'prep-on-behalf-of-of',
'prep-out-of': 'prep-out-of-of',
}
_deinversions = {
'mod': 'domain',
}
def invert_relation(self, relation):
"""
Invert or deinvert *relation*.
"""
if self.is_relation_inverted(relation):
rel = self._deinversions.get(relation, relation[:-3])
else:
rel = self._inversions.get(relation, relation + '-of')
if rel is None:
raise PenmanError(
'Cannot (de)invert {}; not allowed'.format(relation)
)
return rel
|
goodmami/penman
|
penman.py
|
AMRCodec.invert_relation
|
python
|
def invert_relation(self, relation):
if self.is_relation_inverted(relation):
rel = self._deinversions.get(relation, relation[:-3])
else:
rel = self._inversions.get(relation, relation + '-of')
if rel is None:
raise PenmanError(
'Cannot (de)invert {}; not allowed'.format(relation)
)
return rel
|
Invert or deinvert *relation*.
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L580-L592
|
[
"def is_relation_inverted(self, relation):\n \"\"\"\n Return True if *relation* is inverted.\n \"\"\"\n return (\n relation in self._deinversions or\n (relation.endswith('-of') and relation not in self._inversions)\n )\n"
] |
class AMRCodec(PENMANCodec):
"""
An AMR codec for graphs in PENMAN notation.
"""
TYPE_REL = 'instance'
TOP_VAR = None
TOP_REL = 'top'
# vars: [a-z]+\d* ; first relation must be node type
NODE_ENTER_RE = re.compile(r'\s*(\()\s*(?=[a-z]+\d*\s*\/)')
NODETYPE_RE = PENMANCodec.ATOM_RE
VAR_RE = re.compile(r'([a-z]+\d*)')
# only non-anonymous relations
RELATION_RE = re.compile(r'(:[^\s(),]+)\s*')
_inversions = {
TYPE_REL: None, # don't allow inverted types
'domain': 'mod',
'consist-of': 'consist-of-of',
'prep-on-behalf-of': 'prep-on-behalf-of-of',
'prep-out-of': 'prep-out-of-of',
}
_deinversions = {
'mod': 'domain',
}
def is_relation_inverted(self, relation):
"""
Return True if *relation* is inverted.
"""
return (
relation in self._deinversions or
(relation.endswith('-of') and relation not in self._inversions)
)
|
goodmami/penman
|
penman.py
|
Graph.triples
|
python
|
def triples(self, source=None, relation=None, target=None):
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
|
Return triples filtered by their *source*, *relation*, or *target*.
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L680-L689
| null |
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def edges(self, source=None, relation=None, target=None):
"""
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
"""
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
def attributes(self, source=None, relation=None, target=None):
"""
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
"""
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
|
goodmami/penman
|
penman.py
|
Graph.edges
|
python
|
def edges(self, source=None, relation=None, target=None):
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
|
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L691-L704
|
[
"def variables(self):\n \"\"\"\n Return the list of variables (nonterminal node identifiers).\n \"\"\"\n return set(v for v, _, _ in self._triples)\n"
] |
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def triples(self, source=None, relation=None, target=None):
"""
Return triples filtered by their *source*, *relation*, or *target*.
"""
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
def attributes(self, source=None, relation=None, target=None):
"""
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
"""
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
|
goodmami/penman
|
penman.py
|
Graph.attributes
|
python
|
def attributes(self, source=None, relation=None, target=None):
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
|
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L706-L719
|
[
"def variables(self):\n \"\"\"\n Return the list of variables (nonterminal node identifiers).\n \"\"\"\n return set(v for v, _, _ in self._triples)\n",
"def triples(self, source=None, relation=None, target=None):\n \"\"\"\n Return triples filtered by their *source*, *relation*, or *target*.\n \"\"\"\n triplematch = lambda t: (\n (source is None or source == t.source) and\n (relation is None or relation == t.relation) and\n (target is None or target == t.target)\n )\n return list(filter(triplematch, self._triples))\n"
] |
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def triples(self, source=None, relation=None, target=None):
"""
Return triples filtered by their *source*, *relation*, or *target*.
"""
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
def edges(self, source=None, relation=None, target=None):
"""
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
"""
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
def reentrancies(self):
"""
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
"""
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
|
goodmami/penman
|
penman.py
|
Graph.reentrancies
|
python
|
def reentrancies(self):
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
|
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
|
train
|
https://github.com/goodmami/penman/blob/a2563ca16063a7330e2028eb489a99cc8e425c41/penman.py#L721-L737
|
[
"def edges(self, source=None, relation=None, target=None):\n \"\"\"\n Return edges filtered by their *source*, *relation*, or *target*.\n\n Edges don't include terminal triples (node types or attributes).\n \"\"\"\n edgematch = lambda e: (\n (source is None or source == e.source) and\n (relation is None or relation == e.relation) and\n (target is None or target == e.target)\n )\n variables = self.variables()\n edges = [t for t in self._triples if t.target in variables]\n return list(filter(edgematch, edges))\n"
] |
class Graph(object):
"""
A basic class for modeling a rooted, directed acyclic graph.
A Graph is defined by a list of triples, which can be divided into
two parts: a list of graph edges where both the source and target
are node identifiers, and a list of node attributes where only the
source is a node identifier and the target is a constant. These
lists can be obtained via the Graph.triples(), Graph.edges(), and
Graph.attributes() methods.
"""
def __init__(self, data=None, top=None):
"""
Create a Graph from an iterable of triples.
Args:
data: an iterable of triples (Triple objects or 3-tuples)
top: the node identifier of the top node; if unspecified,
the source of the first triple is used
Example:
>>> Graph([
... ('b', 'instance', 'bark'),
... ('d', 'instance', 'dog'),
... ('b', 'ARG1', 'd')
... ])
"""
self._triples = []
self._top = None
if data is None:
data = []
else:
data = list(data) # make list (e.g., if its a generator)
if data:
self._triples.extend(
Triple(*t, inverted=getattr(t, 'inverted', None))
for t in data
)
# implicit top: source of first triple
if top is None:
top = data[0][0]
self.top = top
def __repr__(self):
return '<{} object (top={}) at {}>'.format(
self.__class__.__name__,
self.top,
id(self)
)
def __str__(self):
return PENMANCodec().encode(self) # just use the default encoder
@property
def top(self):
"""
The top variable.
"""
return self._top
@top.setter
def top(self, top):
if top not in self.variables():
raise ValueError('top must be a valid node')
self._top = top # check if top is valid variable?
def variables(self):
"""
Return the list of variables (nonterminal node identifiers).
"""
return set(v for v, _, _ in self._triples)
def triples(self, source=None, relation=None, target=None):
"""
Return triples filtered by their *source*, *relation*, or *target*.
"""
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
def edges(self, source=None, relation=None, target=None):
"""
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
"""
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
def attributes(self, source=None, relation=None, target=None):
"""
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
"""
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
|
maxcountryman/atomos
|
atomos/util.py
|
synchronized
|
python
|
def synchronized(fn):
'''
A decorator which acquires a lock before attempting to execute its wrapped
function. Releases the lock in a finally clause.
:param fn: The function to wrap.
'''
lock = threading.Lock()
@functools.wraps(fn)
def decorated(*args, **kwargs):
lock.acquire()
try:
return fn(*args, **kwargs)
finally:
lock.release()
return decorated
|
A decorator which acquires a lock before attempting to execute its wrapped
function. Releases the lock in a finally clause.
:param fn: The function to wrap.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/util.py#L21-L38
| null |
# -*- coding: utf-8 -*-
'''
atomos.util
Utility functions.
'''
from __future__ import absolute_import
import functools
import threading
from multiprocessing import Value, Lock
def repr(module, instance, value):
repr_fmt = '<{m}.{cls}({val}) object at {addr}>'
return repr_fmt.format(m=module,
cls=instance.__class__.__name__,
val=value,
addr=hex(id(instance)))
class ReadersWriterLock(object):
'''
A readers-writer lock.
Provides exclusive locking on write while allowing for concurrent access
on read. Useful for when a data structure cannot be updated atomically and
therefore reads during a write could yield incorrect representations.
To use, construct a new `ReadersWriterLock` instance. Use the shared
property (i.e. the shared lock object) when the shared semantic is desired
and the exclusive property (i.e. the exclusive lock object) when the
exclusive semantic is desired.
Lock objects are wrappers around `threading.Lock`. As a result, the normal
usage patterns are valid. For example, a shared lock can be acquired like
this::
>>> lock = ReadersWriterLock()
>>> lock.shared.acquire()
An exclusive lock can be acquired in a similar fashion, using the
`exclusive` attribute instead. Both locks are also provisioned as context
managers. Note that a difference in API here is that no blocking parameter
should be provided.
Readers-writer locks are meant to allow for a specific situation where a
critical section should be visible to multiple readers so long as there is
no writer. The latter case is simply an exclusive lock. However this does
not allow for concurrent readers.
To facilitate multiple readers, two "locks" are provided: a shared and
exclusive lock. While the exclusive lock is not held, the shared lock may
be acquired as many times as desired. However once the exclusive lock is
obtained, attempts to acquire the read lock will block until the exclusive
lock is released.
Note that obtaining the write lock implies that there are no readers and in
fact an attempt to acquire it will block until all the readers have
released the lock.
'''
def __init__(self):
self._reader_lock = threading.Lock()
self._writer_lock = threading.Lock()
self._reader_count = 0
class SharedLock(object):
def acquire(inner):
'''
Acquires the shared lock, prevents acquisition of the exclusive
lock.
'''
self._reader_lock.acquire()
if self._reader_count == 0:
self._writer_lock.acquire()
try:
self._reader_count += 1
finally:
self._reader_lock.release()
def release(inner):
'''
Releases the shared lock, allows acquisition of the exclusive
lock.
'''
self._reader_lock.acquire()
try:
self._reader_count -= 1
finally:
if self._reader_count == 0:
self._writer_lock.release()
self._reader_lock.release()
def __enter__(inner):
inner.acquire()
return inner
def __exit__(inner, exc_value, exc_type, tb):
inner.release()
self.shared = SharedLock()
class ExclusiveLock(object):
def acquire(inner):
'''
Acquires the exclusive lock, prevents acquisition of the shared
lock.
'''
self._writer_lock.acquire()
def release(inner):
'''
Releases the exclusive lock, allows acquistion of the shared
lock.
'''
self._writer_lock.release()
def __enter__(inner):
inner.acquire()
return inner
def __exit__(inner, exc_value, exc_type, tb):
inner.release()
self.exclusive = ExclusiveLock()
class ReadersWriterLockMultiprocessing(object):
'''
A readers-writer lock multiprocessing.
Works like ReadersWriterLock but uses multiprocessing Lock and Value.
'''
def __init__(self):
self._reader_lock = Lock()
self._writer_lock = Lock()
self._reader_count = Value('i')
class SharedLock(object):
def acquire(inner):
'''
Acquires the shared lock, prevents acquisition of the exclusive
lock.
'''
self._reader_lock.acquire()
if self._reader_count.value == 0:
self._writer_lock.acquire()
try:
self._reader_count.value += 1
finally:
self._reader_lock.release()
def release(inner):
'''
Releases the shared lock, allows acquisition of the exclusive
lock.
'''
self._reader_lock.acquire()
try:
self._reader_count.value -= 1
finally:
if self._reader_count.value == 0:
self._writer_lock.release()
self._reader_lock.release()
def __enter__(inner):
inner.acquire()
return inner
def __exit__(inner, exc_value, exc_type, tb):
inner.release()
self.shared = SharedLock()
class ExclusiveLock(object):
def acquire(inner):
'''
Acquires the exclusive lock, prevents acquisition of the shared
lock.
'''
self._writer_lock.acquire()
def release(inner):
'''
Releases the exclusive lock, allows acquistion of the shared
lock.
'''
self._writer_lock.release()
def __enter__(inner):
inner.acquire()
return inner
def __exit__(inner, exc_value, exc_type, tb):
inner.release()
self.exclusive = ExclusiveLock()
|
maxcountryman/atomos
|
atomos/atomic.py
|
AtomicReference.set
|
python
|
def set(self, value):
'''
Atomically sets the value to `value`.
:param value: The value to set.
'''
with self._lock.exclusive:
self._value = value
return value
|
Atomically sets the value to `value`.
:param value: The value to set.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atomic.py#L38-L46
| null |
class AtomicReference(object):
'''
A reference to an object which allows atomic manipulation semantics.
AtomicReferences are particularlly useful when an object cannot otherwise
be manipulated atomically.
'''
def __init__(self, value=None):
self._value = value
self._lock = util.ReadersWriterLock()
def __repr__(self):
return util.repr(__name__, self, self._value)
def get(self):
'''
Returns the value.
'''
with self._lock.shared:
return self._value
def get_and_set(self, value):
'''
Atomically sets the value to `value` and returns the old value.
:param value: The value to set.
'''
with self._lock.exclusive:
oldval = self._value
self._value = value
return oldval
def compare_and_set(self, expect, update):
'''
Atomically sets the value to `update` if the current value is equal to
`expect`.
:param expect: The expected current value.
:param update: The value to set if and only if `expect` equals the
current value.
'''
with self._lock.exclusive:
if self._value == expect:
self._value = update
return True
return False
|
maxcountryman/atomos
|
atomos/atomic.py
|
AtomicReference.get_and_set
|
python
|
def get_and_set(self, value):
'''
Atomically sets the value to `value` and returns the old value.
:param value: The value to set.
'''
with self._lock.exclusive:
oldval = self._value
self._value = value
return oldval
|
Atomically sets the value to `value` and returns the old value.
:param value: The value to set.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atomic.py#L48-L57
| null |
class AtomicReference(object):
'''
A reference to an object which allows atomic manipulation semantics.
AtomicReferences are particularlly useful when an object cannot otherwise
be manipulated atomically.
'''
def __init__(self, value=None):
self._value = value
self._lock = util.ReadersWriterLock()
def __repr__(self):
return util.repr(__name__, self, self._value)
def get(self):
'''
Returns the value.
'''
with self._lock.shared:
return self._value
def set(self, value):
'''
Atomically sets the value to `value`.
:param value: The value to set.
'''
with self._lock.exclusive:
self._value = value
return value
def compare_and_set(self, expect, update):
'''
Atomically sets the value to `update` if the current value is equal to
`expect`.
:param expect: The expected current value.
:param update: The value to set if and only if `expect` equals the
current value.
'''
with self._lock.exclusive:
if self._value == expect:
self._value = update
return True
return False
|
maxcountryman/atomos
|
atomos/atomic.py
|
AtomicReference.compare_and_set
|
python
|
def compare_and_set(self, expect, update):
'''
Atomically sets the value to `update` if the current value is equal to
`expect`.
:param expect: The expected current value.
:param update: The value to set if and only if `expect` equals the
current value.
'''
with self._lock.exclusive:
if self._value == expect:
self._value = update
return True
return False
|
Atomically sets the value to `update` if the current value is equal to
`expect`.
:param expect: The expected current value.
:param update: The value to set if and only if `expect` equals the
current value.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atomic.py#L59-L73
| null |
class AtomicReference(object):
'''
A reference to an object which allows atomic manipulation semantics.
AtomicReferences are particularlly useful when an object cannot otherwise
be manipulated atomically.
'''
def __init__(self, value=None):
self._value = value
self._lock = util.ReadersWriterLock()
def __repr__(self):
return util.repr(__name__, self, self._value)
def get(self):
'''
Returns the value.
'''
with self._lock.shared:
return self._value
def set(self, value):
'''
Atomically sets the value to `value`.
:param value: The value to set.
'''
with self._lock.exclusive:
self._value = value
return value
def get_and_set(self, value):
'''
Atomically sets the value to `value` and returns the old value.
:param value: The value to set.
'''
with self._lock.exclusive:
oldval = self._value
self._value = value
return oldval
|
maxcountryman/atomos
|
atomos/atomic.py
|
AtomicNumber.add_and_get
|
python
|
def add_and_get(self, delta):
'''
Atomically adds `delta` to the current value.
:param delta: The delta to add.
'''
with self._lock.exclusive:
self._value += delta
return self._value
|
Atomically adds `delta` to the current value.
:param delta: The delta to add.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atomic.py#L111-L119
| null |
class AtomicNumber(AtomicReference):
'''
AtomicNumber object super type.
Contains common methods for AtomicInteger, AtomicLong, and AtomicFloat.
'''
# We do not need a locked get since numbers are not complex data types.
def get(self):
'''
Returns the value.
'''
return self._value
def get_and_add(self, delta):
'''
Atomically adds `delta` to the current value and returns the old value.
:param delta: The delta to add.
'''
with self._lock.exclusive:
oldval = self._value
self._value += delta
return oldval
def subtract_and_get(self, delta):
'''
Atomically subtracts `delta` from the current value.
:param delta: The delta to subtract.
'''
with self._lock.exclusive:
self._value -= delta
return self._value
def get_and_subtract(self, delta):
'''
Atomically subtracts `delta` from the current value and returns the
old value.
:param delta: The delta to subtract.
'''
with self._lock.exclusive:
oldval = self._value
self._value -= delta
return oldval
|
maxcountryman/atomos
|
atomos/atomic.py
|
AtomicNumber.get_and_add
|
python
|
def get_and_add(self, delta):
'''
Atomically adds `delta` to the current value and returns the old value.
:param delta: The delta to add.
'''
with self._lock.exclusive:
oldval = self._value
self._value += delta
return oldval
|
Atomically adds `delta` to the current value and returns the old value.
:param delta: The delta to add.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atomic.py#L121-L130
| null |
class AtomicNumber(AtomicReference):
'''
AtomicNumber object super type.
Contains common methods for AtomicInteger, AtomicLong, and AtomicFloat.
'''
# We do not need a locked get since numbers are not complex data types.
def get(self):
'''
Returns the value.
'''
return self._value
def add_and_get(self, delta):
'''
Atomically adds `delta` to the current value.
:param delta: The delta to add.
'''
with self._lock.exclusive:
self._value += delta
return self._value
def subtract_and_get(self, delta):
'''
Atomically subtracts `delta` from the current value.
:param delta: The delta to subtract.
'''
with self._lock.exclusive:
self._value -= delta
return self._value
def get_and_subtract(self, delta):
'''
Atomically subtracts `delta` from the current value and returns the
old value.
:param delta: The delta to subtract.
'''
with self._lock.exclusive:
oldval = self._value
self._value -= delta
return oldval
|
maxcountryman/atomos
|
atomos/atomic.py
|
AtomicNumber.subtract_and_get
|
python
|
def subtract_and_get(self, delta):
'''
Atomically subtracts `delta` from the current value.
:param delta: The delta to subtract.
'''
with self._lock.exclusive:
self._value -= delta
return self._value
|
Atomically subtracts `delta` from the current value.
:param delta: The delta to subtract.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atomic.py#L132-L140
| null |
class AtomicNumber(AtomicReference):
'''
AtomicNumber object super type.
Contains common methods for AtomicInteger, AtomicLong, and AtomicFloat.
'''
# We do not need a locked get since numbers are not complex data types.
def get(self):
'''
Returns the value.
'''
return self._value
def add_and_get(self, delta):
'''
Atomically adds `delta` to the current value.
:param delta: The delta to add.
'''
with self._lock.exclusive:
self._value += delta
return self._value
def get_and_add(self, delta):
'''
Atomically adds `delta` to the current value and returns the old value.
:param delta: The delta to add.
'''
with self._lock.exclusive:
oldval = self._value
self._value += delta
return oldval
def get_and_subtract(self, delta):
'''
Atomically subtracts `delta` from the current value and returns the
old value.
:param delta: The delta to subtract.
'''
with self._lock.exclusive:
oldval = self._value
self._value -= delta
return oldval
|
maxcountryman/atomos
|
atomos/atomic.py
|
AtomicNumber.get_and_subtract
|
python
|
def get_and_subtract(self, delta):
'''
Atomically subtracts `delta` from the current value and returns the
old value.
:param delta: The delta to subtract.
'''
with self._lock.exclusive:
oldval = self._value
self._value -= delta
return oldval
|
Atomically subtracts `delta` from the current value and returns the
old value.
:param delta: The delta to subtract.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atomic.py#L142-L152
| null |
class AtomicNumber(AtomicReference):
'''
AtomicNumber object super type.
Contains common methods for AtomicInteger, AtomicLong, and AtomicFloat.
'''
# We do not need a locked get since numbers are not complex data types.
def get(self):
'''
Returns the value.
'''
return self._value
def add_and_get(self, delta):
'''
Atomically adds `delta` to the current value.
:param delta: The delta to add.
'''
with self._lock.exclusive:
self._value += delta
return self._value
def get_and_add(self, delta):
'''
Atomically adds `delta` to the current value and returns the old value.
:param delta: The delta to add.
'''
with self._lock.exclusive:
oldval = self._value
self._value += delta
return oldval
def subtract_and_get(self, delta):
'''
Atomically subtracts `delta` from the current value.
:param delta: The delta to subtract.
'''
with self._lock.exclusive:
self._value -= delta
return self._value
|
maxcountryman/atomos
|
atomos/atom.py
|
ARef.notify_watches
|
python
|
def notify_watches(self, oldval, newval):
'''
Passes `oldval` and `newval` to each `fn` in the watches dictionary,
passing along its respective key and the reference to this object.
:param oldval: The old value which will be passed to the watch.
:param newval: The new value which will be passed to the watch.
'''
watches = self._watches.copy()
for k in watches:
fn = watches[k]
if isinstance(fn, collections.Callable):
fn(k, self, oldval, newval)
|
Passes `oldval` and `newval` to each `fn` in the watches dictionary,
passing along its respective key and the reference to this object.
:param oldval: The old value which will be passed to the watch.
:param newval: The new value which will be passed to the watch.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atom.py#L64-L76
| null |
class ARef(object):
'''
Ref object super type.
Refs may hold watches which can be notified when a value a ref holds
changes. In effect, a watch is a callback which receives the key,
object reference, oldval, and newval.
For example, a watch function could be constructed like this::
>>> def watch(k, ref, old, new):
... print k, ref, old, new
>>> aref = ARef()
>>> aref.add_watch(watch)
However note that `ARef` should generally be subclassed, a la `Atom`, as it
does not independently hold any value and functions merely as a container
for the watch semantics.
'''
def __init__(self):
self._watches = {}
def get_watches(self):
'''
Returns the watches dictionary.
'''
return self._watches
@util.synchronized
def add_watch(self, key, fn):
'''
Adds `key` to the watches dictionary with the value `fn`.
:param key: The key for this watch.
:param fn: The value for this watch, should be a function. Note that
this function will be passed values which should not be mutated
wihtout copying as other watches may in turn be passed the same
eference!
'''
self._watches[key] = fn
@util.synchronized
def remove_watch(self, key):
'''
Removes `key` from the watches dictionary.
:param key: The key of the watch to remove.
'''
self._watches.pop(key, None)
|
maxcountryman/atomos
|
atomos/atom.py
|
Atom.swap
|
python
|
def swap(self, fn, *args, **kwargs):
'''
Given a mutator `fn`, calls `fn` with the atom's current state, `args`,
and `kwargs`. The return value of this invocation becomes the new value
of the atom. Returns the new value.
:param fn: A function which will be passed the current state. Should
return a new state. This absolutely *MUST NOT* mutate the
reference to the current state! If it does, this function may loop
indefinitely.
:param \*args: Arguments to be passed to `fn`.
:param \*\*kwargs: Keyword arguments to be passed to `fn`.
'''
while True:
oldval = self.deref()
newval = fn(oldval, *args, **kwargs)
if self._state.compare_and_set(oldval, newval):
self.notify_watches(oldval, newval)
return newval
|
Given a mutator `fn`, calls `fn` with the atom's current state, `args`,
and `kwargs`. The return value of this invocation becomes the new value
of the atom. Returns the new value.
:param fn: A function which will be passed the current state. Should
return a new state. This absolutely *MUST NOT* mutate the
reference to the current state! If it does, this function may loop
indefinitely.
:param \*args: Arguments to be passed to `fn`.
:param \*\*kwargs: Keyword arguments to be passed to `fn`.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atom.py#L154-L172
|
[
"def deref(self):\n '''\n Returns the value held.\n '''\n return self._state.get()\n"
] |
class Atom(ARef):
'''
Atom object type.
Atoms store mutable state and provide thread-safe methods for retrieving
and altering it. This is useful in multi-threaded contexts or any time an
application makes use of shared mutable state. By using an atom, it is
possible to ensure that read values are always consistent and that write
values do not yield unexpected state (e.g. data loss).
For example, if an application uses a dictionary to store state, using an
atom will guarantee that the dictionary is never in an inconsistent state
as it is being updated::
>>> state = Atom({'active_conns': 0, 'clients': set([])})
>>> def new_client(cur_state, client):
... new_state = cur_state.copy()
... new_state['clients'].add(client)
... new_state['active_conns'] += 1
... return new_state
>>> state.swap(new_client, 'foo')
In the above example we use an atom to store state about connections. Our
mutation function, `new_client` is a function which takes the existing
state contained by the atom and a new client. Any part of our program which
reads the atom's state by using `deref` will always see a consistent view
of its value.
This is particularly useful when altering shared mutable state which cannot
be changed atomically. Atoms enable atomic semantics for such objects.
Because atoms are themselves refs and inherit from `ARef`, it is also
possible to add watches to them. Watches can be thought of as callbacks
which are invoked when the atom's state changes.
For example, if we would like to log each time a client connects, we can
write a watch that will be responsible for this and then add it to the
state atom::
>>> state = Atom({'active_conns': 0, 'clients': set([])})
>>> def log_new_clients(k, ref, old, new):
... if not new['active_conns'] > old['active_conns']:
... return
... old_clients = old['clients']
... new_clients = new['clients']
... print 'new client', new_clients.difference(old_clients)
>>> state.add_watch('log_new_clients', log_new_clients)
We have added a watch which will print out a message when the client count
has increased, i.e. a client has been added. Note that for a real world
application, a proper logging facility should be preferred over print.
Watches are keyed by the first value passed to `add_watch` and are invoked
whenever the atom changes with the key, reference, old state, and new state
as parameters.
Note that watch functions may be called from multiple threads at once and
therefore their ordering is not guaranteed. For instance, an atom's state
may change, and before the watches can be notified another thread may alter
the atom and trigger notifications. It is possible for the second thread's
notifications to arrive before the first's.
'''
def __init__(self, state):
super(Atom, self).__init__()
self._state = atomic.AtomicReference(state)
def __repr__(self):
return util.repr(__name__, self, self._state._value)
def deref(self):
'''
Returns the value held.
'''
return self._state.get()
def reset(self, newval):
'''
Resets the atom's value to `newval`, returning `newval`.
:param newval: The new value to set.
'''
oldval = self._state.get()
self._state.set(newval)
self.notify_watches(oldval, newval)
return newval
def compare_and_set(self, oldval, newval):
'''
Given `oldval` and `newval`, sets the atom's value to `newval` if and
only if `oldval` is the atom's current value. Returns `True` upon
success, otherwise `False`.
:param oldval: The old expected value.
:param newval: The new value which will be set if and only if `oldval`
equals the current value.
'''
ret = self._state.compare_and_set(oldval, newval)
if ret:
self.notify_watches(oldval, newval)
return ret
|
maxcountryman/atomos
|
atomos/atom.py
|
Atom.reset
|
python
|
def reset(self, newval):
'''
Resets the atom's value to `newval`, returning `newval`.
:param newval: The new value to set.
'''
oldval = self._state.get()
self._state.set(newval)
self.notify_watches(oldval, newval)
return newval
|
Resets the atom's value to `newval`, returning `newval`.
:param newval: The new value to set.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atom.py#L174-L183
|
[
"def notify_watches(self, oldval, newval):\n '''\n Passes `oldval` and `newval` to each `fn` in the watches dictionary,\n passing along its respective key and the reference to this object.\n\n :param oldval: The old value which will be passed to the watch.\n :param newval: The new value which will be passed to the watch.\n '''\n watches = self._watches.copy()\n for k in watches:\n fn = watches[k]\n if isinstance(fn, collections.Callable):\n fn(k, self, oldval, newval)\n"
] |
class Atom(ARef):
'''
Atom object type.
Atoms store mutable state and provide thread-safe methods for retrieving
and altering it. This is useful in multi-threaded contexts or any time an
application makes use of shared mutable state. By using an atom, it is
possible to ensure that read values are always consistent and that write
values do not yield unexpected state (e.g. data loss).
For example, if an application uses a dictionary to store state, using an
atom will guarantee that the dictionary is never in an inconsistent state
as it is being updated::
>>> state = Atom({'active_conns': 0, 'clients': set([])})
>>> def new_client(cur_state, client):
... new_state = cur_state.copy()
... new_state['clients'].add(client)
... new_state['active_conns'] += 1
... return new_state
>>> state.swap(new_client, 'foo')
In the above example we use an atom to store state about connections. Our
mutation function, `new_client` is a function which takes the existing
state contained by the atom and a new client. Any part of our program which
reads the atom's state by using `deref` will always see a consistent view
of its value.
This is particularly useful when altering shared mutable state which cannot
be changed atomically. Atoms enable atomic semantics for such objects.
Because atoms are themselves refs and inherit from `ARef`, it is also
possible to add watches to them. Watches can be thought of as callbacks
which are invoked when the atom's state changes.
For example, if we would like to log each time a client connects, we can
write a watch that will be responsible for this and then add it to the
state atom::
>>> state = Atom({'active_conns': 0, 'clients': set([])})
>>> def log_new_clients(k, ref, old, new):
... if not new['active_conns'] > old['active_conns']:
... return
... old_clients = old['clients']
... new_clients = new['clients']
... print 'new client', new_clients.difference(old_clients)
>>> state.add_watch('log_new_clients', log_new_clients)
We have added a watch which will print out a message when the client count
has increased, i.e. a client has been added. Note that for a real world
application, a proper logging facility should be preferred over print.
Watches are keyed by the first value passed to `add_watch` and are invoked
whenever the atom changes with the key, reference, old state, and new state
as parameters.
Note that watch functions may be called from multiple threads at once and
therefore their ordering is not guaranteed. For instance, an atom's state
may change, and before the watches can be notified another thread may alter
the atom and trigger notifications. It is possible for the second thread's
notifications to arrive before the first's.
'''
def __init__(self, state):
super(Atom, self).__init__()
self._state = atomic.AtomicReference(state)
def __repr__(self):
return util.repr(__name__, self, self._state._value)
def deref(self):
'''
Returns the value held.
'''
return self._state.get()
def swap(self, fn, *args, **kwargs):
'''
Given a mutator `fn`, calls `fn` with the atom's current state, `args`,
and `kwargs`. The return value of this invocation becomes the new value
of the atom. Returns the new value.
:param fn: A function which will be passed the current state. Should
return a new state. This absolutely *MUST NOT* mutate the
reference to the current state! If it does, this function may loop
indefinitely.
:param \*args: Arguments to be passed to `fn`.
:param \*\*kwargs: Keyword arguments to be passed to `fn`.
'''
while True:
oldval = self.deref()
newval = fn(oldval, *args, **kwargs)
if self._state.compare_and_set(oldval, newval):
self.notify_watches(oldval, newval)
return newval
def compare_and_set(self, oldval, newval):
'''
Given `oldval` and `newval`, sets the atom's value to `newval` if and
only if `oldval` is the atom's current value. Returns `True` upon
success, otherwise `False`.
:param oldval: The old expected value.
:param newval: The new value which will be set if and only if `oldval`
equals the current value.
'''
ret = self._state.compare_and_set(oldval, newval)
if ret:
self.notify_watches(oldval, newval)
return ret
|
maxcountryman/atomos
|
atomos/atom.py
|
Atom.compare_and_set
|
python
|
def compare_and_set(self, oldval, newval):
'''
Given `oldval` and `newval`, sets the atom's value to `newval` if and
only if `oldval` is the atom's current value. Returns `True` upon
success, otherwise `False`.
:param oldval: The old expected value.
:param newval: The new value which will be set if and only if `oldval`
equals the current value.
'''
ret = self._state.compare_and_set(oldval, newval)
if ret:
self.notify_watches(oldval, newval)
return ret
|
Given `oldval` and `newval`, sets the atom's value to `newval` if and
only if `oldval` is the atom's current value. Returns `True` upon
success, otherwise `False`.
:param oldval: The old expected value.
:param newval: The new value which will be set if and only if `oldval`
equals the current value.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atom.py#L185-L199
|
[
"def notify_watches(self, oldval, newval):\n '''\n Passes `oldval` and `newval` to each `fn` in the watches dictionary,\n passing along its respective key and the reference to this object.\n\n :param oldval: The old value which will be passed to the watch.\n :param newval: The new value which will be passed to the watch.\n '''\n watches = self._watches.copy()\n for k in watches:\n fn = watches[k]\n if isinstance(fn, collections.Callable):\n fn(k, self, oldval, newval)\n"
] |
class Atom(ARef):
'''
Atom object type.
Atoms store mutable state and provide thread-safe methods for retrieving
and altering it. This is useful in multi-threaded contexts or any time an
application makes use of shared mutable state. By using an atom, it is
possible to ensure that read values are always consistent and that write
values do not yield unexpected state (e.g. data loss).
For example, if an application uses a dictionary to store state, using an
atom will guarantee that the dictionary is never in an inconsistent state
as it is being updated::
>>> state = Atom({'active_conns': 0, 'clients': set([])})
>>> def new_client(cur_state, client):
... new_state = cur_state.copy()
... new_state['clients'].add(client)
... new_state['active_conns'] += 1
... return new_state
>>> state.swap(new_client, 'foo')
In the above example we use an atom to store state about connections. Our
mutation function, `new_client` is a function which takes the existing
state contained by the atom and a new client. Any part of our program which
reads the atom's state by using `deref` will always see a consistent view
of its value.
This is particularly useful when altering shared mutable state which cannot
be changed atomically. Atoms enable atomic semantics for such objects.
Because atoms are themselves refs and inherit from `ARef`, it is also
possible to add watches to them. Watches can be thought of as callbacks
which are invoked when the atom's state changes.
For example, if we would like to log each time a client connects, we can
write a watch that will be responsible for this and then add it to the
state atom::
>>> state = Atom({'active_conns': 0, 'clients': set([])})
>>> def log_new_clients(k, ref, old, new):
... if not new['active_conns'] > old['active_conns']:
... return
... old_clients = old['clients']
... new_clients = new['clients']
... print 'new client', new_clients.difference(old_clients)
>>> state.add_watch('log_new_clients', log_new_clients)
We have added a watch which will print out a message when the client count
has increased, i.e. a client has been added. Note that for a real world
application, a proper logging facility should be preferred over print.
Watches are keyed by the first value passed to `add_watch` and are invoked
whenever the atom changes with the key, reference, old state, and new state
as parameters.
Note that watch functions may be called from multiple threads at once and
therefore their ordering is not guaranteed. For instance, an atom's state
may change, and before the watches can be notified another thread may alter
the atom and trigger notifications. It is possible for the second thread's
notifications to arrive before the first's.
'''
def __init__(self, state):
super(Atom, self).__init__()
self._state = atomic.AtomicReference(state)
def __repr__(self):
return util.repr(__name__, self, self._state._value)
def deref(self):
'''
Returns the value held.
'''
return self._state.get()
def swap(self, fn, *args, **kwargs):
'''
Given a mutator `fn`, calls `fn` with the atom's current state, `args`,
and `kwargs`. The return value of this invocation becomes the new value
of the atom. Returns the new value.
:param fn: A function which will be passed the current state. Should
return a new state. This absolutely *MUST NOT* mutate the
reference to the current state! If it does, this function may loop
indefinitely.
:param \*args: Arguments to be passed to `fn`.
:param \*\*kwargs: Keyword arguments to be passed to `fn`.
'''
while True:
oldval = self.deref()
newval = fn(oldval, *args, **kwargs)
if self._state.compare_and_set(oldval, newval):
self.notify_watches(oldval, newval)
return newval
def reset(self, newval):
'''
Resets the atom's value to `newval`, returning `newval`.
:param newval: The new value to set.
'''
oldval = self._state.get()
self._state.set(newval)
self.notify_watches(oldval, newval)
return newval
|
maxcountryman/atomos
|
atomos/multiprocessing/atomic.py
|
AtomicCtypesReference.set
|
python
|
def set(self, value):
'''
Atomically sets the value to `value`.
:param value: The value to set.
'''
with self._reference.get_lock():
self._reference.value = value
return value
|
Atomically sets the value to `value`.
:param value: The value to set.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/multiprocessing/atomic.py#L95-L103
| null |
class AtomicCtypesReference(object):
'''
A reference to an object which allows atomic manipulation semantics.
AtomicCtypesReferences are particularlly useful when an object cannot
otherwise be manipulated atomically.
This only support ctypes data types.
https://docs.python.org/3.4/library/ctypes.html#fundamental-data-types
'''
def __init__(self, typecode_or_type=None, value=None):
'''
Atomic reference
:param typecode_or_type: The type of object allocated from shared
memory.
:param value: The default value.
'''
self._typecode_or_type = typecode_or_type
self._reference = multiprocessing.Value(self._typecode_or_type, value)
def __repr__(self):
return util.repr(__name__, self, self._reference)
def get(self):
'''
Returns the value.
'''
with self._reference.get_lock():
return self._reference.value
def get_and_set(self, value):
'''
Atomically sets the value to `value` and returns the old value.
:param value: The value to set.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value = value
return oldval
def compare_and_set(self, expect, update):
'''
Atomically sets the value to `update` if the current value is equal to
`expect`.
:param expect: The expected current value.
:param update: The value to set if and only if `expect` equals the
current value.
'''
with self._reference.get_lock():
if self._reference.value == expect:
self._reference.value = update
return True
return False
|
maxcountryman/atomos
|
atomos/multiprocessing/atomic.py
|
AtomicCtypesReference.get_and_set
|
python
|
def get_and_set(self, value):
'''
Atomically sets the value to `value` and returns the old value.
:param value: The value to set.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value = value
return oldval
|
Atomically sets the value to `value` and returns the old value.
:param value: The value to set.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/multiprocessing/atomic.py#L105-L114
| null |
class AtomicCtypesReference(object):
'''
A reference to an object which allows atomic manipulation semantics.
AtomicCtypesReferences are particularlly useful when an object cannot
otherwise be manipulated atomically.
This only support ctypes data types.
https://docs.python.org/3.4/library/ctypes.html#fundamental-data-types
'''
def __init__(self, typecode_or_type=None, value=None):
'''
Atomic reference
:param typecode_or_type: The type of object allocated from shared
memory.
:param value: The default value.
'''
self._typecode_or_type = typecode_or_type
self._reference = multiprocessing.Value(self._typecode_or_type, value)
def __repr__(self):
return util.repr(__name__, self, self._reference)
def get(self):
'''
Returns the value.
'''
with self._reference.get_lock():
return self._reference.value
def set(self, value):
'''
Atomically sets the value to `value`.
:param value: The value to set.
'''
with self._reference.get_lock():
self._reference.value = value
return value
def compare_and_set(self, expect, update):
'''
Atomically sets the value to `update` if the current value is equal to
`expect`.
:param expect: The expected current value.
:param update: The value to set if and only if `expect` equals the
current value.
'''
with self._reference.get_lock():
if self._reference.value == expect:
self._reference.value = update
return True
return False
|
maxcountryman/atomos
|
atomos/multiprocessing/atomic.py
|
AtomicCtypesReference.compare_and_set
|
python
|
def compare_and_set(self, expect, update):
'''
Atomically sets the value to `update` if the current value is equal to
`expect`.
:param expect: The expected current value.
:param update: The value to set if and only if `expect` equals the
current value.
'''
with self._reference.get_lock():
if self._reference.value == expect:
self._reference.value = update
return True
return False
|
Atomically sets the value to `update` if the current value is equal to
`expect`.
:param expect: The expected current value.
:param update: The value to set if and only if `expect` equals the
current value.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/multiprocessing/atomic.py#L116-L130
| null |
class AtomicCtypesReference(object):
'''
A reference to an object which allows atomic manipulation semantics.
AtomicCtypesReferences are particularlly useful when an object cannot
otherwise be manipulated atomically.
This only support ctypes data types.
https://docs.python.org/3.4/library/ctypes.html#fundamental-data-types
'''
def __init__(self, typecode_or_type=None, value=None):
'''
Atomic reference
:param typecode_or_type: The type of object allocated from shared
memory.
:param value: The default value.
'''
self._typecode_or_type = typecode_or_type
self._reference = multiprocessing.Value(self._typecode_or_type, value)
def __repr__(self):
return util.repr(__name__, self, self._reference)
def get(self):
'''
Returns the value.
'''
with self._reference.get_lock():
return self._reference.value
def set(self, value):
'''
Atomically sets the value to `value`.
:param value: The value to set.
'''
with self._reference.get_lock():
self._reference.value = value
return value
def get_and_set(self, value):
'''
Atomically sets the value to `value` and returns the old value.
:param value: The value to set.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value = value
return oldval
|
maxcountryman/atomos
|
atomos/multiprocessing/atomic.py
|
AtomicNumber.add_and_get
|
python
|
def add_and_get(self, delta):
'''
Atomically adds `delta` to the current value.
:param delta: The delta to add.
'''
with self._reference.get_lock():
self._reference.value += delta
return self._reference.value
|
Atomically adds `delta` to the current value.
:param delta: The delta to add.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/multiprocessing/atomic.py#L169-L177
| null |
class AtomicNumber(AtomicCtypesReference):
'''
AtomicNumber object super type.
Contains common methods for AtomicInteger, AtomicLong, and AtomicFloat.
'''
# We do not need a locked get since numbers are not complex data types.
def get(self):
'''
Returns the value.
'''
return self._reference.value
def get_and_add(self, delta):
'''
Atomically adds `delta` to the current value and returns the old value.
:param delta: The delta to add.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value += delta
return oldval
def subtract_and_get(self, delta):
'''
Atomically subtracts `delta` from the current value.
:param delta: The delta to subtract.
'''
with self._reference.get_lock():
self._reference.value -= delta
return self._reference.value
def get_and_subtract(self, delta):
'''
Atomically subtracts `delta` from the current value and returns the
old value.
:param delta: The delta to subtract.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value -= delta
return oldval
|
maxcountryman/atomos
|
atomos/multiprocessing/atomic.py
|
AtomicNumber.get_and_add
|
python
|
def get_and_add(self, delta):
'''
Atomically adds `delta` to the current value and returns the old value.
:param delta: The delta to add.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value += delta
return oldval
|
Atomically adds `delta` to the current value and returns the old value.
:param delta: The delta to add.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/multiprocessing/atomic.py#L179-L188
| null |
class AtomicNumber(AtomicCtypesReference):
'''
AtomicNumber object super type.
Contains common methods for AtomicInteger, AtomicLong, and AtomicFloat.
'''
# We do not need a locked get since numbers are not complex data types.
def get(self):
'''
Returns the value.
'''
return self._reference.value
def add_and_get(self, delta):
'''
Atomically adds `delta` to the current value.
:param delta: The delta to add.
'''
with self._reference.get_lock():
self._reference.value += delta
return self._reference.value
def subtract_and_get(self, delta):
'''
Atomically subtracts `delta` from the current value.
:param delta: The delta to subtract.
'''
with self._reference.get_lock():
self._reference.value -= delta
return self._reference.value
def get_and_subtract(self, delta):
'''
Atomically subtracts `delta` from the current value and returns the
old value.
:param delta: The delta to subtract.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value -= delta
return oldval
|
maxcountryman/atomos
|
atomos/multiprocessing/atomic.py
|
AtomicNumber.subtract_and_get
|
python
|
def subtract_and_get(self, delta):
'''
Atomically subtracts `delta` from the current value.
:param delta: The delta to subtract.
'''
with self._reference.get_lock():
self._reference.value -= delta
return self._reference.value
|
Atomically subtracts `delta` from the current value.
:param delta: The delta to subtract.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/multiprocessing/atomic.py#L190-L198
| null |
class AtomicNumber(AtomicCtypesReference):
'''
AtomicNumber object super type.
Contains common methods for AtomicInteger, AtomicLong, and AtomicFloat.
'''
# We do not need a locked get since numbers are not complex data types.
def get(self):
'''
Returns the value.
'''
return self._reference.value
def add_and_get(self, delta):
'''
Atomically adds `delta` to the current value.
:param delta: The delta to add.
'''
with self._reference.get_lock():
self._reference.value += delta
return self._reference.value
def get_and_add(self, delta):
'''
Atomically adds `delta` to the current value and returns the old value.
:param delta: The delta to add.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value += delta
return oldval
def get_and_subtract(self, delta):
'''
Atomically subtracts `delta` from the current value and returns the
old value.
:param delta: The delta to subtract.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value -= delta
return oldval
|
maxcountryman/atomos
|
atomos/multiprocessing/atomic.py
|
AtomicNumber.get_and_subtract
|
python
|
def get_and_subtract(self, delta):
'''
Atomically subtracts `delta` from the current value and returns the
old value.
:param delta: The delta to subtract.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value -= delta
return oldval
|
Atomically subtracts `delta` from the current value and returns the
old value.
:param delta: The delta to subtract.
|
train
|
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/multiprocessing/atomic.py#L200-L210
| null |
class AtomicNumber(AtomicCtypesReference):
'''
AtomicNumber object super type.
Contains common methods for AtomicInteger, AtomicLong, and AtomicFloat.
'''
# We do not need a locked get since numbers are not complex data types.
def get(self):
'''
Returns the value.
'''
return self._reference.value
def add_and_get(self, delta):
'''
Atomically adds `delta` to the current value.
:param delta: The delta to add.
'''
with self._reference.get_lock():
self._reference.value += delta
return self._reference.value
def get_and_add(self, delta):
'''
Atomically adds `delta` to the current value and returns the old value.
:param delta: The delta to add.
'''
with self._reference.get_lock():
oldval = self._reference.value
self._reference.value += delta
return oldval
def subtract_and_get(self, delta):
'''
Atomically subtracts `delta` from the current value.
:param delta: The delta to subtract.
'''
with self._reference.get_lock():
self._reference.value -= delta
return self._reference.value
|
joesecurity/jbxapi
|
setup.py
|
get_version
|
python
|
def get_version():
here = os.path.abspath(os.path.dirname(__file__))
jbxapi_file = os.path.join(here, "jbxapi.py")
with open(jbxapi_file) as f:
content = f.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M)
if not match:
raise RuntimeError("Unable to find version string.")
return match.group(1)
|
Extract the version number from the code.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/setup.py#L7-L18
| null |
import re
import os
from setuptools import setup
setup(name='jbxapi',
version=get_version(),
description='API for Joe Sandbox',
url='https://github.com/joesecurity/joesandboxcloudapi',
author='Joe Security LLC',
license='MIT',
py_modules=['jbxapi'],
install_requires=[
'requests>=2.18.4,<3',
],
entry_points={
'console_scripts': [
'jbxapi=jbxapi:main'
],
},
zip_safe=False,
keywords="security sandbox joe",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Security',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
])
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.analysis_list
|
python
|
def analysis_list(self):
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
|
Fetch a list of all analyses.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L158-L164
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.submit_sample
|
python
|
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
|
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L166-L202
|
[
"def _submit(self, params, files=None, _extra_params={}):\n data = copy.copy(submission_defaults)\n data.update(params)\n data = self._prepare_params_for_submission(data)\n data.update(_extra_params)\n\n response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)\n\n return self._raise_or_extract(response)\n",
"def _check_user_parameters(self, user_parameters):\n \"\"\"\n Verifies that the parameter dict given by the user only contains\n known keys. This ensures that the user detects typos faster.\n \"\"\"\n if not user_parameters:\n return\n\n # sanity check against typos\n for key in user_parameters:\n if key not in submission_defaults:\n raise ValueError(\"Unknown parameter {0}\".format(key))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.submit_sample_url
|
python
|
def submit_sample_url(self, url, params={}, _extra_params={}):
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
|
Submit a sample at a given URL for analysis.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L204-L211
|
[
"def _submit(self, params, files=None, _extra_params={}):\n data = copy.copy(submission_defaults)\n data.update(params)\n data = self._prepare_params_for_submission(data)\n data.update(_extra_params)\n\n response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)\n\n return self._raise_or_extract(response)\n",
"def _check_user_parameters(self, user_parameters):\n \"\"\"\n Verifies that the parameter dict given by the user only contains\n known keys. This ensures that the user detects typos faster.\n \"\"\"\n if not user_parameters:\n return\n\n # sanity check against typos\n for key in user_parameters:\n if key not in submission_defaults:\n raise ValueError(\"Unknown parameter {0}\".format(key))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.submit_url
|
python
|
def submit_url(self, url, params={}, _extra_params={}):
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
|
Submit a website for analysis.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L213-L220
|
[
"def _submit(self, params, files=None, _extra_params={}):\n data = copy.copy(submission_defaults)\n data.update(params)\n data = self._prepare_params_for_submission(data)\n data.update(_extra_params)\n\n response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)\n\n return self._raise_or_extract(response)\n",
"def _check_user_parameters(self, user_parameters):\n \"\"\"\n Verifies that the parameter dict given by the user only contains\n known keys. This ensures that the user detects typos faster.\n \"\"\"\n if not user_parameters:\n return\n\n # sanity check against typos\n for key in user_parameters:\n if key not in submission_defaults:\n raise ValueError(\"Unknown parameter {0}\".format(key))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.submit_cookbook
|
python
|
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
|
Submit a cookbook.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L222-L228
|
[
"def _submit(self, params, files=None, _extra_params={}):\n data = copy.copy(submission_defaults)\n data.update(params)\n data = self._prepare_params_for_submission(data)\n data.update(_extra_params)\n\n response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)\n\n return self._raise_or_extract(response)\n",
"def _check_user_parameters(self, user_parameters):\n \"\"\"\n Verifies that the parameter dict given by the user only contains\n known keys. This ensures that the user detects typos faster.\n \"\"\"\n if not user_parameters:\n return\n\n # sanity check against typos\n for key in user_parameters:\n if key not in submission_defaults:\n raise ValueError(\"Unknown parameter {0}\".format(key))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.submission_delete
|
python
|
def submission_delete(self, submission_id):
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
|
Delete a submission.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L271-L277
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.server_online
|
python
|
def server_online(self):
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
|
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L279-L285
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.analysis_info
|
python
|
def analysis_info(self, webid):
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
|
Show the status and most important attributes of an analysis.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L287-L293
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.analysis_download
|
python
|
def analysis_download(self, webid, type, run=None, file=None):
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
|
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L303-L363
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.analysis_search
|
python
|
def analysis_search(self, query):
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
|
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L365-L373
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.server_systems
|
python
|
def server_systems(self):
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
|
Retrieve a list of available systems.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L375-L381
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.account_info
|
python
|
def account_info(self):
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
|
Only available on Joe Sandbox Cloud
Show information about the account.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L383-L391
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.server_info
|
python
|
def server_info(self):
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
|
Query information about the server.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L393-L399
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.server_lia_countries
|
python
|
def server_lia_countries(self):
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
|
Show the available localized internet anonymization countries.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L401-L407
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox.server_languages_and_locales
|
python
|
def server_languages_and_locales(self):
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
|
Show the available languages and locales
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L409-L415
|
[
"def _post(self, url, data=None, **kwargs):\n \"\"\"\n Wrapper around requests.post which\n\n (a) always inserts a timeout\n (b) converts errors to ConnectionError\n (c) re-tries a few times\n (d) converts file names to ASCII\n \"\"\"\n\n # Remove non-ASCII characters from filenames due to a limitation of the combination of\n # urllib3 (via python-requests) and our server\n # https://github.com/requests/requests/issues/2117\n # Internal Ticket #3090\n if \"files\" in kwargs and kwargs[\"files\"] is not None:\n acceptable_chars = \"0123456789\" + \"abcdefghijklmnopqrstuvwxyz\" + \\\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" + \" _-.,()[]{}\"\n for param_name, fp in kwargs[\"files\"].items():\n if isinstance(fp, (tuple, list)):\n filename, fp = fp\n else:\n filename = requests.utils.guess_filename(fp) or param_name\n\n def encode(char):\n try:\n if char in acceptable_chars:\n return char\n except UnicodeDecodeError:\n pass\n return \"x{:02x}\".format(ord(char))\n filename = \"\".join(encode(x) for x in filename)\n\n kwargs[\"files\"][param_name] = (filename, fp)\n\n for i in itertools.count(1):\n try:\n return self.session.post(url, data=data, timeout=self.timeout, **kwargs)\n except requests.exceptions.Timeout as e:\n # exhausted all retries\n if i >= self.retries:\n raise ConnectionError(e)\n except requests.exceptions.RequestException as e:\n raise ConnectionError(e)\n\n # exponential backoff\n max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...\n time.sleep(random.uniform(0, max_backoff))\n",
"def _raise_or_extract(self, response):\n \"\"\"\n Raises an exception if the response indicates an API error.\n\n Otherwise returns the object at the 'data' key of the API response.\n \"\"\"\n\n try:\n data = response.json()\n except ValueError:\n raise JoeException(\"The server responded with an unexpected format ({}). Is the API url correct?\". format(response.status_code))\n\n try:\n if response.ok:\n return data['data']\n else:\n error = data['errors'][0]\n raise ApiError(error)\n except (KeyError, TypeError):\n raise JoeException(\"Unexpected data ({}). Is the API url correct?\". format(response.status_code))\n"
] |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox._post
|
python
|
def _post(self, url, data=None, **kwargs):
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
|
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L417-L463
| null |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox._check_user_parameters
|
python
|
def _check_user_parameters(self, user_parameters):
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
|
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L465-L476
| null |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _raise_or_extract(self, response):
"""
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
"""
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
joesecurity/jbxapi
|
jbxapi.py
|
JoeSandbox._raise_or_extract
|
python
|
def _raise_or_extract(self, response):
try:
data = response.json()
except ValueError:
raise JoeException("The server responded with an unexpected format ({}). Is the API url correct?". format(response.status_code))
try:
if response.ok:
return data['data']
else:
error = data['errors'][0]
raise ApiError(error)
except (KeyError, TypeError):
raise JoeException("Unexpected data ({}). Is the API url correct?". format(response.status_code))
|
Raises an exception if the response indicates an API error.
Otherwise returns the object at the 'data' key of the API response.
|
train
|
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L478-L497
| null |
class JoeSandbox(object):
def __init__(self, apikey=API_KEY, apiurl=API_URL, accept_tac=ACCEPT_TAC, timeout=None, verify_ssl=True, retries=3, proxies=None):
"""
Create a JoeSandbox object.
Parameters:
apikey: the api key
apiurl: the api url
accept_tac: Joe Sandbox Cloud requires accepting the Terms and Conditions.
https://jbxcloud.joesecurity.org/resources/termsandconditions.pdf
timeout: Timeout in seconds for accessing the API. Raises a ConnectionError on timeout.
verify_ssl: Enable or disable checking SSL certificates.
retries: Number of times requests should be retried if they timeout.
proxies: Proxy settings, see the requests library for more information:
http://docs.python-requests.org/en/master/user/advanced/#proxies
"""
self.apikey = apikey
self.apiurl = apiurl.rstrip("/")
self.accept_tac = accept_tac
self.timeout = timeout
self.retries = retries
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.proxies = proxies
self.session.headers.update({"User-Agent": "jbxapi.py {}".format(__version__)})
def analysis_list(self):
"""
Fetch a list of all analyses.
"""
response = self._post(self.apiurl + '/v2/analysis/list', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def submit_sample(self, sample, cookbook=None, params={}, _extra_params={}):
"""
Submit a sample and returns the submission id.
Parameters:
sample: The sample to submit. Needs to be a file-like object or a tuple in
the shape (filename, file-like object).
cookbook: Uploads a cookbook together with the sample. Needs to be a file-like object or a
tuple in the shape (filename, file-like object)
params: Customize the sandbox parameters. They are described in more detail
in the default submission parameters.
Example:
import jbxapi
joe = jbxapi.JoeSandbox()
with open("sample.exe", "rb") as f:
joe.submit_sample(f, params={"systems": ["w7"]})
Example:
import io, jbxapi
joe = jbxapi.JoeSandbox()
cookbook = io.BytesIO(b"cookbook content")
with open("sample.exe", "rb") as f:
joe.submit_sample(f, cookbook=cookbook)
"""
self._check_user_parameters(params)
files = {'sample': sample}
if cookbook:
files['cookbook'] = cookbook
return self._submit(params, files, _extra_params=_extra_params)
def submit_sample_url(self, url, params={}, _extra_params={}):
"""
Submit a sample at a given URL for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['sample-url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_url(self, url, params={}, _extra_params={}):
"""
Submit a website for analysis.
"""
self._check_user_parameters(params)
params = copy.copy(params)
params['url'] = url
return self._submit(params, _extra_params=_extra_params)
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
def _prepare_params_for_submission(self, params):
params['apikey'] = self.apikey
params['accept-tac'] = "1" if self.accept_tac else "0"
# rename array parameters
params['systems[]'] = params.pop('systems', None)
params['tags[]'] = params.pop('tags', None)
# submit booleans as "0" and "1"
for key, value in params.items():
try:
default = submission_defaults[key]
except KeyError:
continue
if default is True or default is False or default is UnsetBool:
if value is None or value is UnsetBool:
params[key] = None
else:
params[key] = "1" if value else "0"
return params
def _submit(self, params, files=None, _extra_params={}):
data = copy.copy(submission_defaults)
data.update(params)
data = self._prepare_params_for_submission(data)
data.update(_extra_params)
response = self._post(self.apiurl + '/v2/submission/new', data=data, files=files)
return self._raise_or_extract(response)
def submission_info(self, submission_id):
"""
Returns information about a submission including all the analysis ids.
"""
response = self._post(self.apiurl + '/v2/submission/info', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def submission_delete(self, submission_id):
"""
Delete a submission.
"""
response = self._post(self.apiurl + '/v2/submission/delete', data={'apikey': self.apikey, 'submission_id': submission_id})
return self._raise_or_extract(response)
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response)
def analysis_info(self, webid):
"""
Show the status and most important attributes of an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/info", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_delete(self, webid):
"""
Delete an analysis.
"""
response = self._post(self.apiurl + "/v2/analysis/delete", data={'apikey': self.apikey, 'webid': webid})
return self._raise_or_extract(response)
def analysis_download(self, webid, type, run=None, file=None):
"""
Download a resource for an analysis. E.g. the full report, binaries, screenshots.
The full list of resources can be found in our API documentation.
When `file` is given, the return value is the filename specified by the server,
otherwise it's a tuple of (filename, bytes).
Parameters:
webid: the webid of the analysis
type: the report type, e.g. 'html', 'bins'
run: specify the run. If it is None, let Joe Sandbox pick one
file: a writeable file-like object (When obmitted, the method returns
the data as a bytes object.)
Example:
json_report, name = joe.analysis_download(123456, 'jsonfixed')
Example:
with open("full_report.html", "wb") as f:
name = joe.analysis_download(123456, "html", file=f)
"""
# when no file is specified, we create our own
if file is None:
_file = io.BytesIO()
else:
_file = file
data = {
'apikey': self.apikey,
'webid': webid,
'type': type,
'run': run,
}
response = self._post(self.apiurl + "/v2/analysis/download", data=data, stream=True)
try:
filename = response.headers["Content-Disposition"].split("filename=")[1][1:-2]
except Exception as e:
filename = type
# do standard error handling when encountering an error (i.e. throw an exception)
if not response.ok:
self._raise_or_extract(response)
raise RuntimeError("Unreachable because statement above should raise.")
try:
for chunk in response.iter_content(1024):
_file.write(chunk)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# no user file means we return the content
if file is None:
return (filename, _file.getvalue())
else:
return filename
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response)
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def account_info(self):
"""
Only available on Joe Sandbox Cloud
Show information about the account.
"""
response = self._post(self.apiurl + "/v2/account/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_info(self):
"""
Query information about the server.
"""
response = self._post(self.apiurl + "/v2/server/info", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_lia_countries(self):
"""
Show the available localized internet anonymization countries.
"""
response = self._post(self.apiurl + "/v2/server/lia_countries", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def server_languages_and_locales(self):
"""
Show the available languages and locales
"""
response = self._post(self.apiurl + "/v2/server/languages_and_locales", data={'apikey': self.apikey})
return self._raise_or_extract(response)
def _post(self, url, data=None, **kwargs):
"""
Wrapper around requests.post which
(a) always inserts a timeout
(b) converts errors to ConnectionError
(c) re-tries a few times
(d) converts file names to ASCII
"""
# Remove non-ASCII characters from filenames due to a limitation of the combination of
# urllib3 (via python-requests) and our server
# https://github.com/requests/requests/issues/2117
# Internal Ticket #3090
if "files" in kwargs and kwargs["files"] is not None:
acceptable_chars = "0123456789" + "abcdefghijklmnopqrstuvwxyz" + \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" + " _-.,()[]{}"
for param_name, fp in kwargs["files"].items():
if isinstance(fp, (tuple, list)):
filename, fp = fp
else:
filename = requests.utils.guess_filename(fp) or param_name
def encode(char):
try:
if char in acceptable_chars:
return char
except UnicodeDecodeError:
pass
return "x{:02x}".format(ord(char))
filename = "".join(encode(x) for x in filename)
kwargs["files"][param_name] = (filename, fp)
for i in itertools.count(1):
try:
return self.session.post(url, data=data, timeout=self.timeout, **kwargs)
except requests.exceptions.Timeout as e:
# exhausted all retries
if i >= self.retries:
raise ConnectionError(e)
except requests.exceptions.RequestException as e:
raise ConnectionError(e)
# exponential backoff
max_backoff = 4 ** i / 10 # .4, 1.6, 6.4, 25.6, ...
time.sleep(random.uniform(0, max_backoff))
def _check_user_parameters(self, user_parameters):
"""
Verifies that the parameter dict given by the user only contains
known keys. This ensures that the user detects typos faster.
"""
if not user_parameters:
return
# sanity check against typos
for key in user_parameters:
if key not in submission_defaults:
raise ValueError("Unknown parameter {0}".format(key))
|
WoLpH/python-statsd
|
statsd/connection.py
|
Connection.send
|
python
|
def send(self, data, sample_rate=None):
'''Send the data over UDP while taking the sample_rate in account
The sample rate should be a number between `0` and `1` which indicates
the probability that a message will be sent. The sample_rate is also
communicated to `statsd` so it knows what multiplier to use.
:keyword data: The data to send
:type data: dict
:keyword sample_rate: The sample rate, defaults to `1` (meaning always)
:type sample_rate: int
'''
if self._disabled:
self.logger.debug('Connection disabled, not sending data')
return False
if sample_rate is None:
sample_rate = self._sample_rate
sampled_data = {}
if sample_rate < 1:
if random.random() <= sample_rate:
# Modify the data so statsd knows our sample_rate
for stat, value in compat.iter_dict(data):
sampled_data[stat] = '%s|@%s' % (data[stat], sample_rate)
else:
sampled_data = data
try:
for stat, value in compat.iter_dict(sampled_data):
send_data = ('%s:%s' % (stat, value)).encode("utf-8")
self.udp_sock.send(send_data)
return True
except Exception as e:
self.logger.exception('unexpected error %r while sending data', e)
return False
|
Send the data over UDP while taking the sample_rate in account
The sample rate should be a number between `0` and `1` which indicates
the probability that a message will be sent. The sample_rate is also
communicated to `statsd` so it knows what multiplier to use.
:keyword data: The data to send
:type data: dict
:keyword sample_rate: The sample rate, defaults to `1` (meaning always)
:type sample_rate: int
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/connection.py#L47-L81
|
[
"def iter_dict(dict_): # pragma: no cover\n if PY3K:\n return dict_.items()\n else:\n return dict_.iteritems()\n"
] |
class Connection(object):
'''Statsd Connection
:keyword host: The statsd host to connect to, defaults to `localhost`
:type host: str
:keyword port: The statsd port to connect to, defaults to `8125`
:type port: int
:keyword sample_rate: The sample rate, defaults to `1` (meaning always)
:type sample_rate: int
:keyword disabled: Turn off sending UDP packets, defaults to ``False``
:type disabled: bool
'''
default_host = 'localhost'
default_port = 8125
default_sample_rate = 1
default_disabled = False
@classmethod
def set_defaults(
cls, host='localhost', port=8125, sample_rate=1, disabled=False):
cls.default_host = host
cls.default_port = port
cls.default_sample_rate = sample_rate
cls.default_disabled = disabled
def __init__(self, host=None, port=None, sample_rate=None, disabled=None):
self._host = host or self.default_host
self._port = int(port or self.default_port)
self._sample_rate = sample_rate or self.default_sample_rate
self._disabled = disabled or self.default_disabled
self.logger = logging.getLogger(
'%s.%s' % (__name__, self.__class__.__name__))
self.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udp_sock.connect((self._host, self._port))
self.logger.debug(
'Initialized connection to %s:%d with P(%.1f)',
self._host, self._port, self._sample_rate)
def __del__(self):
'''
We close UDP socket connection explicitly for pypy.
'''
self.udp_sock.close() # pragma: no cover
def __repr__(self):
return '<%s[%s:%d] P(%.1f)>' % (
self.__class__.__name__,
self._host,
self._port,
self._sample_rate,
)
|
WoLpH/python-statsd
|
statsd/timer.py
|
Timer.start
|
python
|
def start(self):
'''Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``'''
assert self._start is None, (
'Unable to start, the timer is already running')
self._last = self._start = time.time()
return self
|
Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/timer.py#L38-L48
| null |
class Timer(statsd.Client):
'''
Statsd Timer Object
Additional documentation is available at the parent class
:class:`~statsd.client.Client`
:keyword name: The name for this timer
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
:keyword min_send_threshold: Timings smaller than this will not be sent so
-1 can be used for all.
:type min_send_threshold: int
>>> timer = Timer('application_name').start()
>>> # do something
>>> timer.stop('executed_action')
True
'''
def __init__(self, name, connection=None, min_send_threshold=-1):
super(Timer, self).__init__(name, connection=connection)
self._start = None
self._last = None
self._stop = None
self.min_send_threshold = min_send_threshold
def send(self, subname, delta):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The time delta (time.time() - time.time()) to report
:type delta: float
'''
ms = delta * 1000
if ms > self.min_send_threshold:
name = self._get_name(self.name, subname)
self.logger.info('%s: %0.08fms', name, ms)
return statsd.Client._send(self, {name: '%0.08f|ms' % ms})
else:
return True
def intermediate(self, subname):
'''Send the time that has passed since our last measurement
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
t = time.time()
response = self.send(subname, t - self._last)
self._last = t
return response
def stop(self, subname='total'):
'''Stop the timer and send the total since `start()` was run
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
assert self._stop is None, (
'Unable to stop, the timer is already stopped')
self._stop = time.time()
return self.send(subname, self._stop - self._start)
def __enter__(self):
'''
Make a context manager out of self to measure time execution in a block
of code.
:return: statsd.timer.Timer
'''
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
'''
Stop measuring time sending total metric, while exiting block of code.
:param exc_type:
:param exc_val:
:param exc_tb:
:return:
'''
self.stop()
def _decorate(self, name, function, class_=None):
class_ = class_ or Timer
@wraps(function)
def _decorator(*args, **kwargs):
timer = self.get_client(name, class_)
timer.start()
try:
return function(*args, **kwargs)
finally:
# Stop the timer, send the message and cleanup
timer.stop('')
return _decorator
def decorate(self, function_or_name):
'''Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
'''
if callable(function_or_name):
return self._decorate(function_or_name.__name__, function_or_name)
else:
return partial(self._decorate, function_or_name)
@contextlib.contextmanager
def time(self, subname=None, class_=None):
'''Returns a context manager to time execution of a block of code.
:keyword subname: The subname to report data to
:type subname: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> with timer.time():
... # resulting timer name: application_name
... pass
>>>
>>>
>>> with timer.time('context_timer'):
... # resulting timer name: application_name.context_timer
... pass
'''
if class_ is None:
class_ = Timer
timer = self.get_client(subname, class_)
timer.start()
yield
timer.stop('')
|
WoLpH/python-statsd
|
statsd/timer.py
|
Timer.send
|
python
|
def send(self, subname, delta):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The time delta (time.time() - time.time()) to report
:type delta: float
'''
ms = delta * 1000
if ms > self.min_send_threshold:
name = self._get_name(self.name, subname)
self.logger.info('%s: %0.08fms', name, ms)
return statsd.Client._send(self, {name: '%0.08f|ms' % ms})
else:
return True
|
Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The time delta (time.time() - time.time()) to report
:type delta: float
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/timer.py#L50-L65
|
[
"def _get_name(cls, *name_parts):\n name_parts = [compat.to_str(x) for x in name_parts if x]\n return '.'.join(name_parts)\n",
"def _send(self, data):\n return self.connection.send(data)\n"
] |
class Timer(statsd.Client):
'''
Statsd Timer Object
Additional documentation is available at the parent class
:class:`~statsd.client.Client`
:keyword name: The name for this timer
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
:keyword min_send_threshold: Timings smaller than this will not be sent so
-1 can be used for all.
:type min_send_threshold: int
>>> timer = Timer('application_name').start()
>>> # do something
>>> timer.stop('executed_action')
True
'''
def __init__(self, name, connection=None, min_send_threshold=-1):
super(Timer, self).__init__(name, connection=connection)
self._start = None
self._last = None
self._stop = None
self.min_send_threshold = min_send_threshold
def start(self):
'''Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``'''
assert self._start is None, (
'Unable to start, the timer is already running')
self._last = self._start = time.time()
return self
def intermediate(self, subname):
'''Send the time that has passed since our last measurement
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
t = time.time()
response = self.send(subname, t - self._last)
self._last = t
return response
def stop(self, subname='total'):
'''Stop the timer and send the total since `start()` was run
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
assert self._stop is None, (
'Unable to stop, the timer is already stopped')
self._stop = time.time()
return self.send(subname, self._stop - self._start)
def __enter__(self):
'''
Make a context manager out of self to measure time execution in a block
of code.
:return: statsd.timer.Timer
'''
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
'''
Stop measuring time sending total metric, while exiting block of code.
:param exc_type:
:param exc_val:
:param exc_tb:
:return:
'''
self.stop()
def _decorate(self, name, function, class_=None):
class_ = class_ or Timer
@wraps(function)
def _decorator(*args, **kwargs):
timer = self.get_client(name, class_)
timer.start()
try:
return function(*args, **kwargs)
finally:
# Stop the timer, send the message and cleanup
timer.stop('')
return _decorator
def decorate(self, function_or_name):
'''Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
'''
if callable(function_or_name):
return self._decorate(function_or_name.__name__, function_or_name)
else:
return partial(self._decorate, function_or_name)
@contextlib.contextmanager
def time(self, subname=None, class_=None):
'''Returns a context manager to time execution of a block of code.
:keyword subname: The subname to report data to
:type subname: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> with timer.time():
... # resulting timer name: application_name
... pass
>>>
>>>
>>> with timer.time('context_timer'):
... # resulting timer name: application_name.context_timer
... pass
'''
if class_ is None:
class_ = Timer
timer = self.get_client(subname, class_)
timer.start()
yield
timer.stop('')
|
WoLpH/python-statsd
|
statsd/timer.py
|
Timer.intermediate
|
python
|
def intermediate(self, subname):
'''Send the time that has passed since our last measurement
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
t = time.time()
response = self.send(subname, t - self._last)
self._last = t
return response
|
Send the time that has passed since our last measurement
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/timer.py#L67-L77
|
[
"def send(self, subname, delta):\n '''Send the data to statsd via self.connection\n\n :keyword subname: The subname to report the data to (appended to the\n client name)\n :type subname: str\n :keyword delta: The time delta (time.time() - time.time()) to report\n :type delta: float\n '''\n ms = delta * 1000\n if ms > self.min_send_threshold:\n name = self._get_name(self.name, subname)\n self.logger.info('%s: %0.08fms', name, ms)\n return statsd.Client._send(self, {name: '%0.08f|ms' % ms})\n else:\n return True\n"
] |
class Timer(statsd.Client):
'''
Statsd Timer Object
Additional documentation is available at the parent class
:class:`~statsd.client.Client`
:keyword name: The name for this timer
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
:keyword min_send_threshold: Timings smaller than this will not be sent so
-1 can be used for all.
:type min_send_threshold: int
>>> timer = Timer('application_name').start()
>>> # do something
>>> timer.stop('executed_action')
True
'''
def __init__(self, name, connection=None, min_send_threshold=-1):
super(Timer, self).__init__(name, connection=connection)
self._start = None
self._last = None
self._stop = None
self.min_send_threshold = min_send_threshold
def start(self):
'''Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``'''
assert self._start is None, (
'Unable to start, the timer is already running')
self._last = self._start = time.time()
return self
def send(self, subname, delta):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The time delta (time.time() - time.time()) to report
:type delta: float
'''
ms = delta * 1000
if ms > self.min_send_threshold:
name = self._get_name(self.name, subname)
self.logger.info('%s: %0.08fms', name, ms)
return statsd.Client._send(self, {name: '%0.08f|ms' % ms})
else:
return True
def stop(self, subname='total'):
'''Stop the timer and send the total since `start()` was run
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
assert self._stop is None, (
'Unable to stop, the timer is already stopped')
self._stop = time.time()
return self.send(subname, self._stop - self._start)
def __enter__(self):
'''
Make a context manager out of self to measure time execution in a block
of code.
:return: statsd.timer.Timer
'''
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
'''
Stop measuring time sending total metric, while exiting block of code.
:param exc_type:
:param exc_val:
:param exc_tb:
:return:
'''
self.stop()
def _decorate(self, name, function, class_=None):
class_ = class_ or Timer
@wraps(function)
def _decorator(*args, **kwargs):
timer = self.get_client(name, class_)
timer.start()
try:
return function(*args, **kwargs)
finally:
# Stop the timer, send the message and cleanup
timer.stop('')
return _decorator
def decorate(self, function_or_name):
'''Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
'''
if callable(function_or_name):
return self._decorate(function_or_name.__name__, function_or_name)
else:
return partial(self._decorate, function_or_name)
@contextlib.contextmanager
def time(self, subname=None, class_=None):
'''Returns a context manager to time execution of a block of code.
:keyword subname: The subname to report data to
:type subname: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> with timer.time():
... # resulting timer name: application_name
... pass
>>>
>>>
>>> with timer.time('context_timer'):
... # resulting timer name: application_name.context_timer
... pass
'''
if class_ is None:
class_ = Timer
timer = self.get_client(subname, class_)
timer.start()
yield
timer.stop('')
|
WoLpH/python-statsd
|
statsd/timer.py
|
Timer.stop
|
python
|
def stop(self, subname='total'):
'''Stop the timer and send the total since `start()` was run
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
assert self._stop is None, (
'Unable to stop, the timer is already stopped')
self._stop = time.time()
return self.send(subname, self._stop - self._start)
|
Stop the timer and send the total since `start()` was run
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/timer.py#L79-L89
|
[
"def send(self, subname, delta):\n '''Send the data to statsd via self.connection\n\n :keyword subname: The subname to report the data to (appended to the\n client name)\n :type subname: str\n :keyword delta: The time delta (time.time() - time.time()) to report\n :type delta: float\n '''\n ms = delta * 1000\n if ms > self.min_send_threshold:\n name = self._get_name(self.name, subname)\n self.logger.info('%s: %0.08fms', name, ms)\n return statsd.Client._send(self, {name: '%0.08f|ms' % ms})\n else:\n return True\n"
] |
class Timer(statsd.Client):
'''
Statsd Timer Object
Additional documentation is available at the parent class
:class:`~statsd.client.Client`
:keyword name: The name for this timer
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
:keyword min_send_threshold: Timings smaller than this will not be sent so
-1 can be used for all.
:type min_send_threshold: int
>>> timer = Timer('application_name').start()
>>> # do something
>>> timer.stop('executed_action')
True
'''
def __init__(self, name, connection=None, min_send_threshold=-1):
super(Timer, self).__init__(name, connection=connection)
self._start = None
self._last = None
self._stop = None
self.min_send_threshold = min_send_threshold
def start(self):
'''Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``'''
assert self._start is None, (
'Unable to start, the timer is already running')
self._last = self._start = time.time()
return self
def send(self, subname, delta):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The time delta (time.time() - time.time()) to report
:type delta: float
'''
ms = delta * 1000
if ms > self.min_send_threshold:
name = self._get_name(self.name, subname)
self.logger.info('%s: %0.08fms', name, ms)
return statsd.Client._send(self, {name: '%0.08f|ms' % ms})
else:
return True
def intermediate(self, subname):
'''Send the time that has passed since our last measurement
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
t = time.time()
response = self.send(subname, t - self._last)
self._last = t
return response
def __enter__(self):
'''
Make a context manager out of self to measure time execution in a block
of code.
:return: statsd.timer.Timer
'''
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
'''
Stop measuring time sending total metric, while exiting block of code.
:param exc_type:
:param exc_val:
:param exc_tb:
:return:
'''
self.stop()
def _decorate(self, name, function, class_=None):
class_ = class_ or Timer
@wraps(function)
def _decorator(*args, **kwargs):
timer = self.get_client(name, class_)
timer.start()
try:
return function(*args, **kwargs)
finally:
# Stop the timer, send the message and cleanup
timer.stop('')
return _decorator
def decorate(self, function_or_name):
'''Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
'''
if callable(function_or_name):
return self._decorate(function_or_name.__name__, function_or_name)
else:
return partial(self._decorate, function_or_name)
@contextlib.contextmanager
def time(self, subname=None, class_=None):
'''Returns a context manager to time execution of a block of code.
:keyword subname: The subname to report data to
:type subname: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> with timer.time():
... # resulting timer name: application_name
... pass
>>>
>>>
>>> with timer.time('context_timer'):
... # resulting timer name: application_name.context_timer
... pass
'''
if class_ is None:
class_ = Timer
timer = self.get_client(subname, class_)
timer.start()
yield
timer.stop('')
|
WoLpH/python-statsd
|
statsd/timer.py
|
Timer.decorate
|
python
|
def decorate(self, function_or_name):
'''Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
'''
if callable(function_or_name):
return self._decorate(function_or_name.__name__, function_or_name)
else:
return partial(self._decorate, function_or_name)
|
Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/timer.py#L127-L152
|
[
"def _decorate(self, name, function, class_=None):\n class_ = class_ or Timer\n\n @wraps(function)\n def _decorator(*args, **kwargs):\n timer = self.get_client(name, class_)\n timer.start()\n try:\n return function(*args, **kwargs)\n finally:\n # Stop the timer, send the message and cleanup\n timer.stop('')\n\n return _decorator\n"
] |
class Timer(statsd.Client):
'''
Statsd Timer Object
Additional documentation is available at the parent class
:class:`~statsd.client.Client`
:keyword name: The name for this timer
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
:keyword min_send_threshold: Timings smaller than this will not be sent so
-1 can be used for all.
:type min_send_threshold: int
>>> timer = Timer('application_name').start()
>>> # do something
>>> timer.stop('executed_action')
True
'''
def __init__(self, name, connection=None, min_send_threshold=-1):
super(Timer, self).__init__(name, connection=connection)
self._start = None
self._last = None
self._stop = None
self.min_send_threshold = min_send_threshold
def start(self):
'''Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``'''
assert self._start is None, (
'Unable to start, the timer is already running')
self._last = self._start = time.time()
return self
def send(self, subname, delta):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The time delta (time.time() - time.time()) to report
:type delta: float
'''
ms = delta * 1000
if ms > self.min_send_threshold:
name = self._get_name(self.name, subname)
self.logger.info('%s: %0.08fms', name, ms)
return statsd.Client._send(self, {name: '%0.08f|ms' % ms})
else:
return True
def intermediate(self, subname):
'''Send the time that has passed since our last measurement
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
t = time.time()
response = self.send(subname, t - self._last)
self._last = t
return response
def stop(self, subname='total'):
'''Stop the timer and send the total since `start()` was run
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
assert self._stop is None, (
'Unable to stop, the timer is already stopped')
self._stop = time.time()
return self.send(subname, self._stop - self._start)
def __enter__(self):
'''
Make a context manager out of self to measure time execution in a block
of code.
:return: statsd.timer.Timer
'''
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
'''
Stop measuring time sending total metric, while exiting block of code.
:param exc_type:
:param exc_val:
:param exc_tb:
:return:
'''
self.stop()
def _decorate(self, name, function, class_=None):
class_ = class_ or Timer
@wraps(function)
def _decorator(*args, **kwargs):
timer = self.get_client(name, class_)
timer.start()
try:
return function(*args, **kwargs)
finally:
# Stop the timer, send the message and cleanup
timer.stop('')
return _decorator
@contextlib.contextmanager
def time(self, subname=None, class_=None):
'''Returns a context manager to time execution of a block of code.
:keyword subname: The subname to report data to
:type subname: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> with timer.time():
... # resulting timer name: application_name
... pass
>>>
>>>
>>> with timer.time('context_timer'):
... # resulting timer name: application_name.context_timer
... pass
'''
if class_ is None:
class_ = Timer
timer = self.get_client(subname, class_)
timer.start()
yield
timer.stop('')
|
WoLpH/python-statsd
|
statsd/timer.py
|
Timer.time
|
python
|
def time(self, subname=None, class_=None):
'''Returns a context manager to time execution of a block of code.
:keyword subname: The subname to report data to
:type subname: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> with timer.time():
... # resulting timer name: application_name
... pass
>>>
>>>
>>> with timer.time('context_timer'):
... # resulting timer name: application_name.context_timer
... pass
'''
if class_ is None:
class_ = Timer
timer = self.get_client(subname, class_)
timer.start()
yield
timer.stop('')
|
Returns a context manager to time execution of a block of code.
:keyword subname: The subname to report data to
:type subname: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> with timer.time():
... # resulting timer name: application_name
... pass
>>>
>>>
>>> with timer.time('context_timer'):
... # resulting timer name: application_name.context_timer
... pass
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/timer.py#L155-L183
|
[
"def get_client(self, name=None, class_=None):\n '''Get a (sub-)client with a separate namespace\n This way you can create a global/app based client with subclients\n per class/function\n\n :keyword name: The name to use, if the name for this client was `spam`\n and the `name` argument is `eggs` than the resulting name will be\n `spam.eggs`\n :type name: str\n :keyword class_: The :class:`~statsd.client.Client` subclass to use\n (e.g. :class:`~statsd.timer.Timer` or\n :class:`~statsd.counter.Counter`)\n :type class_: :class:`~statsd.client.Client`\n '''\n\n # If the name was given, use it. Otherwise simply clone\n name = self._get_name(self.name, name)\n\n # Create using the given class, or the current class\n if not class_:\n class_ = self.__class__\n\n return class_(\n name=name,\n connection=self.connection,\n )\n",
"def start(self):\n '''Start the timer and store the start time, this can only be executed\n once per instance\n\n It returns the timer instance so it can be chained when instantiating\n the timer instance like this:\n ``timer = Timer('application_name').start()``'''\n assert self._start is None, (\n 'Unable to start, the timer is already running')\n self._last = self._start = time.time()\n return self\n",
"def stop(self, subname='total'):\n '''Stop the timer and send the total since `start()` was run\n\n :keyword subname: The subname to report the data to (appended to the\n client name)\n :type subname: str\n '''\n assert self._stop is None, (\n 'Unable to stop, the timer is already stopped')\n self._stop = time.time()\n return self.send(subname, self._stop - self._start)\n"
] |
class Timer(statsd.Client):
'''
Statsd Timer Object
Additional documentation is available at the parent class
:class:`~statsd.client.Client`
:keyword name: The name for this timer
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
:keyword min_send_threshold: Timings smaller than this will not be sent so
-1 can be used for all.
:type min_send_threshold: int
>>> timer = Timer('application_name').start()
>>> # do something
>>> timer.stop('executed_action')
True
'''
def __init__(self, name, connection=None, min_send_threshold=-1):
super(Timer, self).__init__(name, connection=connection)
self._start = None
self._last = None
self._stop = None
self.min_send_threshold = min_send_threshold
def start(self):
'''Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``'''
assert self._start is None, (
'Unable to start, the timer is already running')
self._last = self._start = time.time()
return self
def send(self, subname, delta):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The time delta (time.time() - time.time()) to report
:type delta: float
'''
ms = delta * 1000
if ms > self.min_send_threshold:
name = self._get_name(self.name, subname)
self.logger.info('%s: %0.08fms', name, ms)
return statsd.Client._send(self, {name: '%0.08f|ms' % ms})
else:
return True
def intermediate(self, subname):
'''Send the time that has passed since our last measurement
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
t = time.time()
response = self.send(subname, t - self._last)
self._last = t
return response
def stop(self, subname='total'):
'''Stop the timer and send the total since `start()` was run
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
assert self._stop is None, (
'Unable to stop, the timer is already stopped')
self._stop = time.time()
return self.send(subname, self._stop - self._start)
def __enter__(self):
'''
Make a context manager out of self to measure time execution in a block
of code.
:return: statsd.timer.Timer
'''
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
'''
Stop measuring time sending total metric, while exiting block of code.
:param exc_type:
:param exc_val:
:param exc_tb:
:return:
'''
self.stop()
def _decorate(self, name, function, class_=None):
class_ = class_ or Timer
@wraps(function)
def _decorator(*args, **kwargs):
timer = self.get_client(name, class_)
timer.start()
try:
return function(*args, **kwargs)
finally:
# Stop the timer, send the message and cleanup
timer.stop('')
return _decorator
def decorate(self, function_or_name):
'''Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
'''
if callable(function_or_name):
return self._decorate(function_or_name.__name__, function_or_name)
else:
return partial(self._decorate, function_or_name)
@contextlib.contextmanager
|
WoLpH/python-statsd
|
statsd/raw.py
|
Raw.send
|
python
|
def send(self, subname, value, timestamp=None):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The raw value to send
'''
if timestamp is None:
ts = int(dt.datetime.now().strftime("%s"))
else:
ts = timestamp
name = self._get_name(self.name, subname)
self.logger.info('%s: %s %s' % (name, value, ts))
return statsd.Client._send(self, {name: '%s|r|%s' % (value, ts)})
|
Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The raw value to send
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/raw.py#L24-L38
|
[
"def _get_name(cls, *name_parts):\n name_parts = [compat.to_str(x) for x in name_parts if x]\n return '.'.join(name_parts)\n",
"def _send(self, data):\n return self.connection.send(data)\n"
] |
class Raw(statsd.Client):
'''Class to implement a statsd raw message.
If a service has already summarized its own
data for e.g. inspection purposes, use this
summarized data to send to a statsd that has
the raw patch, and this data will be sent
to graphite pretty much unchanged.
See https://github.com/chuyskywalker/statsd/blob/master/README.md for
more info.
>>> raw = Raw('test')
>>> raw.send('name', 12435)
True
>>> import time
>>> raw.send('name', 12435, time.time())
True
'''
|
WoLpH/python-statsd
|
statsd/gauge.py
|
Gauge._send
|
python
|
def _send(self, subname, value):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
name = self._get_name(self.name, subname)
self.logger.info('%s: %s', name, value)
return statsd.Client._send(self, {name: '%s|g' % value})
|
Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/gauge.py#L10-L20
|
[
"def _get_name(cls, *name_parts):\n name_parts = [compat.to_str(x) for x in name_parts if x]\n return '.'.join(name_parts)\n",
"def _send(self, data):\n return self.connection.send(data)\n"
] |
class Gauge(statsd.Client):
'Class to implement a statsd gauge'
def send(self, subname, value):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
assert isinstance(value, compat.NUM_TYPES)
return self._send(subname, value)
def increment(self, subname=None, delta=1):
'''Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
'''
delta = int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
def decrement(self, subname=None, delta=1):
'''Decrement the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.decrement('gauge_name', 10)
True
>>> gauge.decrement(delta=10)
True
>>> gauge.decrement('gauge_name')
True
'''
delta = -int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
def __add__(self, delta):
'''Increment the gauge with `delta`
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge += 5
'''
self.increment(delta=delta)
return self
def __sub__(self, delta):
'''Decrement the gauge with `delta`
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge -= 5
'''
self.decrement(delta=delta)
return self
def set(self, subname, value):
'''
Set the data ignoring the sign, ie set("test", -1) will set "test"
exactly to -1 (not decrement it by 1)
See https://github.com/etsy/statsd/blob/master/docs/metric_types.md
"Adding a sign to the gauge value will change the value, rather
than setting it.
gaugor:-10|g
gaugor:+4|g
So if gaugor was 333, those commands would set it to 333 - 10 + 4, or
327.
Note: This implies you can't explicitly set a gauge to a negative
number without first setting it to zero."
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The new gauge value
'''
assert isinstance(value, compat.NUM_TYPES)
if value < 0:
self._send(subname, 0)
return self._send(subname, value)
|
WoLpH/python-statsd
|
statsd/gauge.py
|
Gauge.send
|
python
|
def send(self, subname, value):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
assert isinstance(value, compat.NUM_TYPES)
return self._send(subname, value)
|
Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/gauge.py#L22-L31
|
[
"def _send(self, subname, value):\n '''Send the data to statsd via self.connection\n\n :keyword subname: The subname to report the data to (appended to the\n client name)\n :type subname: str\n :keyword value: The gauge value to send\n '''\n name = self._get_name(self.name, subname)\n self.logger.info('%s: %s', name, value)\n return statsd.Client._send(self, {name: '%s|g' % value})\n"
] |
class Gauge(statsd.Client):
'Class to implement a statsd gauge'
def _send(self, subname, value):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
name = self._get_name(self.name, subname)
self.logger.info('%s: %s', name, value)
return statsd.Client._send(self, {name: '%s|g' % value})
def increment(self, subname=None, delta=1):
'''Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
'''
delta = int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
def decrement(self, subname=None, delta=1):
'''Decrement the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.decrement('gauge_name', 10)
True
>>> gauge.decrement(delta=10)
True
>>> gauge.decrement('gauge_name')
True
'''
delta = -int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
def __add__(self, delta):
'''Increment the gauge with `delta`
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge += 5
'''
self.increment(delta=delta)
return self
def __sub__(self, delta):
'''Decrement the gauge with `delta`
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge -= 5
'''
self.decrement(delta=delta)
return self
def set(self, subname, value):
'''
Set the data ignoring the sign, ie set("test", -1) will set "test"
exactly to -1 (not decrement it by 1)
See https://github.com/etsy/statsd/blob/master/docs/metric_types.md
"Adding a sign to the gauge value will change the value, rather
than setting it.
gaugor:-10|g
gaugor:+4|g
So if gaugor was 333, those commands would set it to 333 - 10 + 4, or
327.
Note: This implies you can't explicitly set a gauge to a negative
number without first setting it to zero."
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The new gauge value
'''
assert isinstance(value, compat.NUM_TYPES)
if value < 0:
self._send(subname, 0)
return self._send(subname, value)
|
WoLpH/python-statsd
|
statsd/gauge.py
|
Gauge.increment
|
python
|
def increment(self, subname=None, delta=1):
'''Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
'''
delta = int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
|
Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/gauge.py#L33-L52
|
[
"def _send(self, subname, value):\n '''Send the data to statsd via self.connection\n\n :keyword subname: The subname to report the data to (appended to the\n client name)\n :type subname: str\n :keyword value: The gauge value to send\n '''\n name = self._get_name(self.name, subname)\n self.logger.info('%s: %s', name, value)\n return statsd.Client._send(self, {name: '%s|g' % value})\n"
] |
class Gauge(statsd.Client):
'Class to implement a statsd gauge'
def _send(self, subname, value):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
name = self._get_name(self.name, subname)
self.logger.info('%s: %s', name, value)
return statsd.Client._send(self, {name: '%s|g' % value})
def send(self, subname, value):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
assert isinstance(value, compat.NUM_TYPES)
return self._send(subname, value)
def decrement(self, subname=None, delta=1):
'''Decrement the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.decrement('gauge_name', 10)
True
>>> gauge.decrement(delta=10)
True
>>> gauge.decrement('gauge_name')
True
'''
delta = -int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
def __add__(self, delta):
'''Increment the gauge with `delta`
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge += 5
'''
self.increment(delta=delta)
return self
def __sub__(self, delta):
'''Decrement the gauge with `delta`
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge -= 5
'''
self.decrement(delta=delta)
return self
def set(self, subname, value):
'''
Set the data ignoring the sign, ie set("test", -1) will set "test"
exactly to -1 (not decrement it by 1)
See https://github.com/etsy/statsd/blob/master/docs/metric_types.md
"Adding a sign to the gauge value will change the value, rather
than setting it.
gaugor:-10|g
gaugor:+4|g
So if gaugor was 333, those commands would set it to 333 - 10 + 4, or
327.
Note: This implies you can't explicitly set a gauge to a negative
number without first setting it to zero."
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The new gauge value
'''
assert isinstance(value, compat.NUM_TYPES)
if value < 0:
self._send(subname, 0)
return self._send(subname, value)
|
WoLpH/python-statsd
|
statsd/gauge.py
|
Gauge.decrement
|
python
|
def decrement(self, subname=None, delta=1):
'''Decrement the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.decrement('gauge_name', 10)
True
>>> gauge.decrement(delta=10)
True
>>> gauge.decrement('gauge_name')
True
'''
delta = -int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
|
Decrement the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.decrement('gauge_name', 10)
True
>>> gauge.decrement(delta=10)
True
>>> gauge.decrement('gauge_name')
True
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/gauge.py#L54-L73
|
[
"def _send(self, subname, value):\n '''Send the data to statsd via self.connection\n\n :keyword subname: The subname to report the data to (appended to the\n client name)\n :type subname: str\n :keyword value: The gauge value to send\n '''\n name = self._get_name(self.name, subname)\n self.logger.info('%s: %s', name, value)\n return statsd.Client._send(self, {name: '%s|g' % value})\n"
] |
class Gauge(statsd.Client):
'Class to implement a statsd gauge'
def _send(self, subname, value):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
name = self._get_name(self.name, subname)
self.logger.info('%s: %s', name, value)
return statsd.Client._send(self, {name: '%s|g' % value})
def send(self, subname, value):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
assert isinstance(value, compat.NUM_TYPES)
return self._send(subname, value)
def increment(self, subname=None, delta=1):
'''Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
'''
delta = int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
def __add__(self, delta):
'''Increment the gauge with `delta`
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge += 5
'''
self.increment(delta=delta)
return self
def __sub__(self, delta):
'''Decrement the gauge with `delta`
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge -= 5
'''
self.decrement(delta=delta)
return self
def set(self, subname, value):
'''
Set the data ignoring the sign, ie set("test", -1) will set "test"
exactly to -1 (not decrement it by 1)
See https://github.com/etsy/statsd/blob/master/docs/metric_types.md
"Adding a sign to the gauge value will change the value, rather
than setting it.
gaugor:-10|g
gaugor:+4|g
So if gaugor was 333, those commands would set it to 333 - 10 + 4, or
327.
Note: This implies you can't explicitly set a gauge to a negative
number without first setting it to zero."
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The new gauge value
'''
assert isinstance(value, compat.NUM_TYPES)
if value < 0:
self._send(subname, 0)
return self._send(subname, value)
|
WoLpH/python-statsd
|
statsd/gauge.py
|
Gauge.set
|
python
|
def set(self, subname, value):
'''
Set the data ignoring the sign, ie set("test", -1) will set "test"
exactly to -1 (not decrement it by 1)
See https://github.com/etsy/statsd/blob/master/docs/metric_types.md
"Adding a sign to the gauge value will change the value, rather
than setting it.
gaugor:-10|g
gaugor:+4|g
So if gaugor was 333, those commands would set it to 333 - 10 + 4, or
327.
Note: This implies you can't explicitly set a gauge to a negative
number without first setting it to zero."
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The new gauge value
'''
assert isinstance(value, compat.NUM_TYPES)
if value < 0:
self._send(subname, 0)
return self._send(subname, value)
|
Set the data ignoring the sign, ie set("test", -1) will set "test"
exactly to -1 (not decrement it by 1)
See https://github.com/etsy/statsd/blob/master/docs/metric_types.md
"Adding a sign to the gauge value will change the value, rather
than setting it.
gaugor:-10|g
gaugor:+4|g
So if gaugor was 333, those commands would set it to 333 - 10 + 4, or
327.
Note: This implies you can't explicitly set a gauge to a negative
number without first setting it to zero."
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The new gauge value
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/gauge.py#L99-L126
|
[
"def _send(self, subname, value):\n '''Send the data to statsd via self.connection\n\n :keyword subname: The subname to report the data to (appended to the\n client name)\n :type subname: str\n :keyword value: The gauge value to send\n '''\n name = self._get_name(self.name, subname)\n self.logger.info('%s: %s', name, value)\n return statsd.Client._send(self, {name: '%s|g' % value})\n"
] |
class Gauge(statsd.Client):
'Class to implement a statsd gauge'
def _send(self, subname, value):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
name = self._get_name(self.name, subname)
self.logger.info('%s: %s', name, value)
return statsd.Client._send(self, {name: '%s|g' % value})
def send(self, subname, value):
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
assert isinstance(value, compat.NUM_TYPES)
return self._send(subname, value)
def increment(self, subname=None, delta=1):
'''Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
'''
delta = int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
def decrement(self, subname=None, delta=1):
'''Decrement the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.decrement('gauge_name', 10)
True
>>> gauge.decrement(delta=10)
True
>>> gauge.decrement('gauge_name')
True
'''
delta = -int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
def __add__(self, delta):
'''Increment the gauge with `delta`
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge += 5
'''
self.increment(delta=delta)
return self
def __sub__(self, delta):
'''Decrement the gauge with `delta`
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge -= 5
'''
self.decrement(delta=delta)
return self
|
WoLpH/python-statsd
|
statsd/client.py
|
Client.get_client
|
python
|
def get_client(self, name=None, class_=None):
'''Get a (sub-)client with a separate namespace
This way you can create a global/app based client with subclients
per class/function
:keyword name: The name to use, if the name for this client was `spam`
and the `name` argument is `eggs` than the resulting name will be
`spam.eggs`
:type name: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
'''
# If the name was given, use it. Otherwise simply clone
name = self._get_name(self.name, name)
# Create using the given class, or the current class
if not class_:
class_ = self.__class__
return class_(
name=name,
connection=self.connection,
)
|
Get a (sub-)client with a separate namespace
This way you can create a global/app based client with subclients
per class/function
:keyword name: The name to use, if the name for this client was `spam`
and the `name` argument is `eggs` than the resulting name will be
`spam.eggs`
:type name: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/client.py#L45-L70
|
[
"def _get_name(cls, *name_parts):\n name_parts = [compat.to_str(x) for x in name_parts if x]\n return '.'.join(name_parts)\n"
] |
class Client(object):
'''Statsd Client Object
:keyword name: The name for this client
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
>>> client = Client('test')
>>> client
<Client:test@<Connection[localhost:8125] P(1.0)>>
>>> client.get_client('spam')
<Client:test.spam@<Connection[localhost:8125] P(1.0)>>
'''
#: The name of the client, everything sent from this client will be \
#: prefixed by name
name = None
#: The :class:`~statsd.connection.Connection` to use, creates a new
#: connection if no connection is given
connection = None
def __init__(self, name, connection=None):
self.name = self._get_name(name)
if not connection:
connection = statsd.Connection()
self.connection = connection
self.logger = logging.getLogger(
'%s.%s' % (__name__, self.__class__.__name__))
@classmethod
def _get_name(cls, *name_parts):
name_parts = [compat.to_str(x) for x in name_parts if x]
return '.'.join(name_parts)
def get_average(self, name=None):
'''Shortcut for getting an :class:`~statsd.average.Average` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Average)
def get_counter(self, name=None):
'''Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Counter)
def get_gauge(self, name=None):
'''Shortcut for getting a :class:`~statsd.gauge.Gauge` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Gauge)
def get_raw(self, name=None):
'''Shortcut for getting a :class:`~statsd.raw.Raw` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Raw)
def get_timer(self, name=None):
'''Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Timer)
def __repr__(self):
return '<%s:%s@%r>' % (
self.__class__.__name__,
self.name,
self.connection,
)
def _send(self, data):
return self.connection.send(data)
|
WoLpH/python-statsd
|
statsd/client.py
|
Client.get_average
|
python
|
def get_average(self, name=None):
'''Shortcut for getting an :class:`~statsd.average.Average` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Average)
|
Shortcut for getting an :class:`~statsd.average.Average` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/client.py#L72-L78
|
[
"def get_client(self, name=None, class_=None):\n '''Get a (sub-)client with a separate namespace\n This way you can create a global/app based client with subclients\n per class/function\n\n :keyword name: The name to use, if the name for this client was `spam`\n and the `name` argument is `eggs` than the resulting name will be\n `spam.eggs`\n :type name: str\n :keyword class_: The :class:`~statsd.client.Client` subclass to use\n (e.g. :class:`~statsd.timer.Timer` or\n :class:`~statsd.counter.Counter`)\n :type class_: :class:`~statsd.client.Client`\n '''\n\n # If the name was given, use it. Otherwise simply clone\n name = self._get_name(self.name, name)\n\n # Create using the given class, or the current class\n if not class_:\n class_ = self.__class__\n\n return class_(\n name=name,\n connection=self.connection,\n )\n"
] |
class Client(object):
'''Statsd Client Object
:keyword name: The name for this client
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
>>> client = Client('test')
>>> client
<Client:test@<Connection[localhost:8125] P(1.0)>>
>>> client.get_client('spam')
<Client:test.spam@<Connection[localhost:8125] P(1.0)>>
'''
#: The name of the client, everything sent from this client will be \
#: prefixed by name
name = None
#: The :class:`~statsd.connection.Connection` to use, creates a new
#: connection if no connection is given
connection = None
def __init__(self, name, connection=None):
self.name = self._get_name(name)
if not connection:
connection = statsd.Connection()
self.connection = connection
self.logger = logging.getLogger(
'%s.%s' % (__name__, self.__class__.__name__))
@classmethod
def _get_name(cls, *name_parts):
name_parts = [compat.to_str(x) for x in name_parts if x]
return '.'.join(name_parts)
def get_client(self, name=None, class_=None):
'''Get a (sub-)client with a separate namespace
This way you can create a global/app based client with subclients
per class/function
:keyword name: The name to use, if the name for this client was `spam`
and the `name` argument is `eggs` than the resulting name will be
`spam.eggs`
:type name: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
'''
# If the name was given, use it. Otherwise simply clone
name = self._get_name(self.name, name)
# Create using the given class, or the current class
if not class_:
class_ = self.__class__
return class_(
name=name,
connection=self.connection,
)
def get_counter(self, name=None):
'''Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Counter)
def get_gauge(self, name=None):
'''Shortcut for getting a :class:`~statsd.gauge.Gauge` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Gauge)
def get_raw(self, name=None):
'''Shortcut for getting a :class:`~statsd.raw.Raw` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Raw)
def get_timer(self, name=None):
'''Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Timer)
def __repr__(self):
return '<%s:%s@%r>' % (
self.__class__.__name__,
self.name,
self.connection,
)
def _send(self, data):
return self.connection.send(data)
|
WoLpH/python-statsd
|
statsd/client.py
|
Client.get_counter
|
python
|
def get_counter(self, name=None):
'''Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Counter)
|
Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/client.py#L80-L86
|
[
"def get_client(self, name=None, class_=None):\n '''Get a (sub-)client with a separate namespace\n This way you can create a global/app based client with subclients\n per class/function\n\n :keyword name: The name to use, if the name for this client was `spam`\n and the `name` argument is `eggs` than the resulting name will be\n `spam.eggs`\n :type name: str\n :keyword class_: The :class:`~statsd.client.Client` subclass to use\n (e.g. :class:`~statsd.timer.Timer` or\n :class:`~statsd.counter.Counter`)\n :type class_: :class:`~statsd.client.Client`\n '''\n\n # If the name was given, use it. Otherwise simply clone\n name = self._get_name(self.name, name)\n\n # Create using the given class, or the current class\n if not class_:\n class_ = self.__class__\n\n return class_(\n name=name,\n connection=self.connection,\n )\n"
] |
class Client(object):
'''Statsd Client Object
:keyword name: The name for this client
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
>>> client = Client('test')
>>> client
<Client:test@<Connection[localhost:8125] P(1.0)>>
>>> client.get_client('spam')
<Client:test.spam@<Connection[localhost:8125] P(1.0)>>
'''
#: The name of the client, everything sent from this client will be \
#: prefixed by name
name = None
#: The :class:`~statsd.connection.Connection` to use, creates a new
#: connection if no connection is given
connection = None
def __init__(self, name, connection=None):
self.name = self._get_name(name)
if not connection:
connection = statsd.Connection()
self.connection = connection
self.logger = logging.getLogger(
'%s.%s' % (__name__, self.__class__.__name__))
@classmethod
def _get_name(cls, *name_parts):
name_parts = [compat.to_str(x) for x in name_parts if x]
return '.'.join(name_parts)
def get_client(self, name=None, class_=None):
'''Get a (sub-)client with a separate namespace
This way you can create a global/app based client with subclients
per class/function
:keyword name: The name to use, if the name for this client was `spam`
and the `name` argument is `eggs` than the resulting name will be
`spam.eggs`
:type name: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
'''
# If the name was given, use it. Otherwise simply clone
name = self._get_name(self.name, name)
# Create using the given class, or the current class
if not class_:
class_ = self.__class__
return class_(
name=name,
connection=self.connection,
)
def get_average(self, name=None):
'''Shortcut for getting an :class:`~statsd.average.Average` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Average)
def get_gauge(self, name=None):
'''Shortcut for getting a :class:`~statsd.gauge.Gauge` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Gauge)
def get_raw(self, name=None):
'''Shortcut for getting a :class:`~statsd.raw.Raw` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Raw)
def get_timer(self, name=None):
'''Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Timer)
def __repr__(self):
return '<%s:%s@%r>' % (
self.__class__.__name__,
self.name,
self.connection,
)
def _send(self, data):
return self.connection.send(data)
|
WoLpH/python-statsd
|
statsd/client.py
|
Client.get_gauge
|
python
|
def get_gauge(self, name=None):
'''Shortcut for getting a :class:`~statsd.gauge.Gauge` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Gauge)
|
Shortcut for getting a :class:`~statsd.gauge.Gauge` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/client.py#L88-L94
|
[
"def get_client(self, name=None, class_=None):\n '''Get a (sub-)client with a separate namespace\n This way you can create a global/app based client with subclients\n per class/function\n\n :keyword name: The name to use, if the name for this client was `spam`\n and the `name` argument is `eggs` than the resulting name will be\n `spam.eggs`\n :type name: str\n :keyword class_: The :class:`~statsd.client.Client` subclass to use\n (e.g. :class:`~statsd.timer.Timer` or\n :class:`~statsd.counter.Counter`)\n :type class_: :class:`~statsd.client.Client`\n '''\n\n # If the name was given, use it. Otherwise simply clone\n name = self._get_name(self.name, name)\n\n # Create using the given class, or the current class\n if not class_:\n class_ = self.__class__\n\n return class_(\n name=name,\n connection=self.connection,\n )\n"
] |
class Client(object):
'''Statsd Client Object
:keyword name: The name for this client
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
>>> client = Client('test')
>>> client
<Client:test@<Connection[localhost:8125] P(1.0)>>
>>> client.get_client('spam')
<Client:test.spam@<Connection[localhost:8125] P(1.0)>>
'''
#: The name of the client, everything sent from this client will be \
#: prefixed by name
name = None
#: The :class:`~statsd.connection.Connection` to use, creates a new
#: connection if no connection is given
connection = None
def __init__(self, name, connection=None):
self.name = self._get_name(name)
if not connection:
connection = statsd.Connection()
self.connection = connection
self.logger = logging.getLogger(
'%s.%s' % (__name__, self.__class__.__name__))
@classmethod
def _get_name(cls, *name_parts):
name_parts = [compat.to_str(x) for x in name_parts if x]
return '.'.join(name_parts)
def get_client(self, name=None, class_=None):
'''Get a (sub-)client with a separate namespace
This way you can create a global/app based client with subclients
per class/function
:keyword name: The name to use, if the name for this client was `spam`
and the `name` argument is `eggs` than the resulting name will be
`spam.eggs`
:type name: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
'''
# If the name was given, use it. Otherwise simply clone
name = self._get_name(self.name, name)
# Create using the given class, or the current class
if not class_:
class_ = self.__class__
return class_(
name=name,
connection=self.connection,
)
def get_average(self, name=None):
'''Shortcut for getting an :class:`~statsd.average.Average` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Average)
def get_counter(self, name=None):
'''Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Counter)
def get_raw(self, name=None):
'''Shortcut for getting a :class:`~statsd.raw.Raw` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Raw)
def get_timer(self, name=None):
'''Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Timer)
def __repr__(self):
return '<%s:%s@%r>' % (
self.__class__.__name__,
self.name,
self.connection,
)
def _send(self, data):
return self.connection.send(data)
|
WoLpH/python-statsd
|
statsd/client.py
|
Client.get_raw
|
python
|
def get_raw(self, name=None):
'''Shortcut for getting a :class:`~statsd.raw.Raw` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Raw)
|
Shortcut for getting a :class:`~statsd.raw.Raw` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/client.py#L96-L102
|
[
"def get_client(self, name=None, class_=None):\n '''Get a (sub-)client with a separate namespace\n This way you can create a global/app based client with subclients\n per class/function\n\n :keyword name: The name to use, if the name for this client was `spam`\n and the `name` argument is `eggs` than the resulting name will be\n `spam.eggs`\n :type name: str\n :keyword class_: The :class:`~statsd.client.Client` subclass to use\n (e.g. :class:`~statsd.timer.Timer` or\n :class:`~statsd.counter.Counter`)\n :type class_: :class:`~statsd.client.Client`\n '''\n\n # If the name was given, use it. Otherwise simply clone\n name = self._get_name(self.name, name)\n\n # Create using the given class, or the current class\n if not class_:\n class_ = self.__class__\n\n return class_(\n name=name,\n connection=self.connection,\n )\n"
] |
class Client(object):
'''Statsd Client Object
:keyword name: The name for this client
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
>>> client = Client('test')
>>> client
<Client:test@<Connection[localhost:8125] P(1.0)>>
>>> client.get_client('spam')
<Client:test.spam@<Connection[localhost:8125] P(1.0)>>
'''
#: The name of the client, everything sent from this client will be \
#: prefixed by name
name = None
#: The :class:`~statsd.connection.Connection` to use, creates a new
#: connection if no connection is given
connection = None
def __init__(self, name, connection=None):
self.name = self._get_name(name)
if not connection:
connection = statsd.Connection()
self.connection = connection
self.logger = logging.getLogger(
'%s.%s' % (__name__, self.__class__.__name__))
@classmethod
def _get_name(cls, *name_parts):
name_parts = [compat.to_str(x) for x in name_parts if x]
return '.'.join(name_parts)
def get_client(self, name=None, class_=None):
'''Get a (sub-)client with a separate namespace
This way you can create a global/app based client with subclients
per class/function
:keyword name: The name to use, if the name for this client was `spam`
and the `name` argument is `eggs` than the resulting name will be
`spam.eggs`
:type name: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
'''
# If the name was given, use it. Otherwise simply clone
name = self._get_name(self.name, name)
# Create using the given class, or the current class
if not class_:
class_ = self.__class__
return class_(
name=name,
connection=self.connection,
)
def get_average(self, name=None):
'''Shortcut for getting an :class:`~statsd.average.Average` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Average)
def get_counter(self, name=None):
'''Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Counter)
def get_gauge(self, name=None):
'''Shortcut for getting a :class:`~statsd.gauge.Gauge` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Gauge)
def get_timer(self, name=None):
'''Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Timer)
def __repr__(self):
return '<%s:%s@%r>' % (
self.__class__.__name__,
self.name,
self.connection,
)
def _send(self, data):
return self.connection.send(data)
|
WoLpH/python-statsd
|
statsd/client.py
|
Client.get_timer
|
python
|
def get_timer(self, name=None):
'''Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Timer)
|
Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
|
train
|
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/client.py#L104-L110
|
[
"def get_client(self, name=None, class_=None):\n '''Get a (sub-)client with a separate namespace\n This way you can create a global/app based client with subclients\n per class/function\n\n :keyword name: The name to use, if the name for this client was `spam`\n and the `name` argument is `eggs` than the resulting name will be\n `spam.eggs`\n :type name: str\n :keyword class_: The :class:`~statsd.client.Client` subclass to use\n (e.g. :class:`~statsd.timer.Timer` or\n :class:`~statsd.counter.Counter`)\n :type class_: :class:`~statsd.client.Client`\n '''\n\n # If the name was given, use it. Otherwise simply clone\n name = self._get_name(self.name, name)\n\n # Create using the given class, or the current class\n if not class_:\n class_ = self.__class__\n\n return class_(\n name=name,\n connection=self.connection,\n )\n"
] |
class Client(object):
'''Statsd Client Object
:keyword name: The name for this client
:type name: str
:keyword connection: The connection to use, will be automatically created
if not given
:type connection: :class:`~statsd.connection.Connection`
>>> client = Client('test')
>>> client
<Client:test@<Connection[localhost:8125] P(1.0)>>
>>> client.get_client('spam')
<Client:test.spam@<Connection[localhost:8125] P(1.0)>>
'''
#: The name of the client, everything sent from this client will be \
#: prefixed by name
name = None
#: The :class:`~statsd.connection.Connection` to use, creates a new
#: connection if no connection is given
connection = None
def __init__(self, name, connection=None):
self.name = self._get_name(name)
if not connection:
connection = statsd.Connection()
self.connection = connection
self.logger = logging.getLogger(
'%s.%s' % (__name__, self.__class__.__name__))
@classmethod
def _get_name(cls, *name_parts):
name_parts = [compat.to_str(x) for x in name_parts if x]
return '.'.join(name_parts)
def get_client(self, name=None, class_=None):
'''Get a (sub-)client with a separate namespace
This way you can create a global/app based client with subclients
per class/function
:keyword name: The name to use, if the name for this client was `spam`
and the `name` argument is `eggs` than the resulting name will be
`spam.eggs`
:type name: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
'''
# If the name was given, use it. Otherwise simply clone
name = self._get_name(self.name, name)
# Create using the given class, or the current class
if not class_:
class_ = self.__class__
return class_(
name=name,
connection=self.connection,
)
def get_average(self, name=None):
'''Shortcut for getting an :class:`~statsd.average.Average` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Average)
def get_counter(self, name=None):
'''Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Counter)
def get_gauge(self, name=None):
'''Shortcut for getting a :class:`~statsd.gauge.Gauge` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Gauge)
def get_raw(self, name=None):
'''Shortcut for getting a :class:`~statsd.raw.Raw` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Raw)
def __repr__(self):
return '<%s:%s@%r>' % (
self.__class__.__name__,
self.name,
self.connection,
)
def _send(self, data):
return self.connection.send(data)
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/handlers.py
|
BasicIDTokenHandler.now
|
python
|
def now(self):
if self._now is None:
# Compute the current time only once per instance
self._now = datetime.utcnow()
return self._now
|
Capture time.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/handlers.py#L83-L88
| null |
class BasicIDTokenHandler(object):
"""
Basic OpenID Connect ID token claims.
For reference see:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
"""
def __init__(self):
self._now = None
@property
def scope_openid(self, data):
""" Returns claims for the `openid` profile. """
return ['iss', 'sub', 'aud', 'iat', 'exp', 'nonce']
def claim_iss(self, data):
""" Required issuer identifier. """
return settings.OAUTH_OIDC_ISSUER
def claim_sub(self, data):
""" Required subject identifier. """
# Use the primary key as the identifier
return str(data['user'].pk)
def claim_aud(self, data):
""" Required audience. """
return data['client'] .client_id
def claim_iat(self, data):
""" Required current/issued time. """
return timegm(self.now.utctimetuple())
def claim_exp(self, data):
""" Required expiration time. """
expiration = getattr(settings, 'OAUTH_ID_TOKEN_EXPIRATION', 30)
expires = self.now + timedelta(seconds=expiration)
return timegm(expires.utctimetuple())
def claim_nonce(self, data):
""" Optional replay attack protection. """
return data.get('value')
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/handlers.py
|
BasicIDTokenHandler.claim_exp
|
python
|
def claim_exp(self, data):
expiration = getattr(settings, 'OAUTH_ID_TOKEN_EXPIRATION', 30)
expires = self.now + timedelta(seconds=expiration)
return timegm(expires.utctimetuple())
|
Required expiration time.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/handlers.py#L111-L115
| null |
class BasicIDTokenHandler(object):
"""
Basic OpenID Connect ID token claims.
For reference see:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
"""
def __init__(self):
self._now = None
@property
def now(self):
""" Capture time. """
if self._now is None:
# Compute the current time only once per instance
self._now = datetime.utcnow()
return self._now
def scope_openid(self, data):
""" Returns claims for the `openid` profile. """
return ['iss', 'sub', 'aud', 'iat', 'exp', 'nonce']
def claim_iss(self, data):
""" Required issuer identifier. """
return settings.OAUTH_OIDC_ISSUER
def claim_sub(self, data):
""" Required subject identifier. """
# Use the primary key as the identifier
return str(data['user'].pk)
def claim_aud(self, data):
""" Required audience. """
return data['client'] .client_id
def claim_iat(self, data):
""" Required current/issued time. """
return timegm(self.now.utctimetuple())
def claim_nonce(self, data):
""" Optional replay attack protection. """
return data.get('value')
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/management/commands/create_oauth2_client.py
|
Command._clean_required_args
|
python
|
def _clean_required_args(self, url, redirect_uri, client_type):
# Validate URLs
for url_to_validate in (url, redirect_uri):
try:
URLValidator()(url_to_validate)
except ValidationError:
raise CommandError("URLs provided are invalid. Please provide valid application and redirect URLs.")
# Validate and map client type to the appropriate django-oauth2-provider constant
client_type = client_type.lower()
client_type = {
'confidential': CONFIDENTIAL,
'public': PUBLIC
}.get(client_type)
if client_type is None:
raise CommandError("Client type provided is invalid. Please use one of 'confidential' or 'public'.")
self.fields = { # pylint: disable=attribute-defined-outside-init
'url': url,
'redirect_uri': redirect_uri,
'client_type': client_type,
}
|
Validate and clean the command's arguments.
Arguments:
url (str): Client's application URL.
redirect_uri (str): Client application's OAuth2 callback URI.
client_type (str): Client's type, indicating whether the Client application
is capable of maintaining the confidentiality of its credentials (e.g., running on a
secure server) or is incapable of doing so (e.g., running in a browser).
Raises:
CommandError, if the URLs provided are invalid, or if the client type provided is invalid.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/management/commands/create_oauth2_client.py#L106-L141
| null |
class Command(BaseCommand):
"""
create_oauth2_client command class
"""
help = 'Create a new OAuth2 Client. Outputs a serialized representation of the newly-created Client.'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
# Required positional arguments.
parser.add_argument(
'url',
help="Url."
)
parser.add_argument(
'redirect_uri',
help="Redirect URI."
)
parser.add_argument(
'client_type',
help="Client type."
)
# Optional options.
parser.add_argument(
'-u',
'--username',
help="Username of a user to associate with the Client."
)
parser.add_argument(
'-n',
'--client_name',
help="String to assign as the Client name."
)
parser.add_argument(
'-i',
'--client_id',
help="String to assign as the Client ID."
)
parser.add_argument(
'-s',
'--client_secret',
help="String to assign as the Client secret. Should not be shared."
)
parser.add_argument(
'-t',
'--trusted',
action='store_true',
help="Designate the Client as trusted. Trusted Clients bypass the user consent "
"form typically displayed after validating the user's credentials."
)
parser.add_argument(
'--logout_uri',
help="Client logout URI. This value will be used for single sign out."
)
def handle(self, *args, **options):
self._clean_required_args(options['url'], options['redirect_uri'], options['client_type'])
self._parse_options(options)
client_id = self.fields.get('client_id')
trusted = self.fields.pop('trusted')
# Check if client ID is already in use. If so, fetch existing Client and update fields.
client_id_claimed = Client.objects.filter(client_id=client_id).exists()
if client_id_claimed:
client = Client.objects.get(client_id=client_id)
for key, value in self.fields.items():
setattr(client, key, value)
client.save()
else:
client = Client.objects.create(**self.fields)
if trusted:
TrustedClient.objects.get_or_create(client=client)
else:
try:
TrustedClient.objects.get(client=client).delete()
except TrustedClient.DoesNotExist:
pass
serialized = json.dumps(client.serialize(), indent=4)
self.stdout.write(serialized)
def _parse_options(self, options):
"""Parse the command's options.
Arguments:
options (dict): Options with which the command was called.
Raises:
CommandError, if a user matching the provided username does not exist.
"""
for key in ('username', 'client_name', 'client_id', 'client_secret', 'trusted', 'logout_uri'):
value = options.get(key)
if value is not None:
self.fields[key] = value
username = self.fields.pop('username', None)
if username is not None:
try:
user_model = get_user_model()
self.fields['user'] = user_model.objects.get(username=username)
except user_model.DoesNotExist:
raise CommandError("User matching the provided username does not exist.")
# The keyword argument 'name' conflicts with that of `call_command()`. We instead
# use 'client_name' up to this point, then swap it out for the expected field, 'name'.
client_name = self.fields.pop('client_name', None)
if client_name is not None:
self.fields['name'] = client_name
logout_uri = self.fields.get('logout_uri')
if logout_uri:
try:
URLValidator()(logout_uri)
except ValidationError:
raise CommandError("The logout_uri is invalid.")
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/management/commands/create_oauth2_client.py
|
Command._parse_options
|
python
|
def _parse_options(self, options):
for key in ('username', 'client_name', 'client_id', 'client_secret', 'trusted', 'logout_uri'):
value = options.get(key)
if value is not None:
self.fields[key] = value
username = self.fields.pop('username', None)
if username is not None:
try:
user_model = get_user_model()
self.fields['user'] = user_model.objects.get(username=username)
except user_model.DoesNotExist:
raise CommandError("User matching the provided username does not exist.")
# The keyword argument 'name' conflicts with that of `call_command()`. We instead
# use 'client_name' up to this point, then swap it out for the expected field, 'name'.
client_name = self.fields.pop('client_name', None)
if client_name is not None:
self.fields['name'] = client_name
logout_uri = self.fields.get('logout_uri')
if logout_uri:
try:
URLValidator()(logout_uri)
except ValidationError:
raise CommandError("The logout_uri is invalid.")
|
Parse the command's options.
Arguments:
options (dict): Options with which the command was called.
Raises:
CommandError, if a user matching the provided username does not exist.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/management/commands/create_oauth2_client.py#L143-L177
| null |
class Command(BaseCommand):
"""
create_oauth2_client command class
"""
help = 'Create a new OAuth2 Client. Outputs a serialized representation of the newly-created Client.'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
# Required positional arguments.
parser.add_argument(
'url',
help="Url."
)
parser.add_argument(
'redirect_uri',
help="Redirect URI."
)
parser.add_argument(
'client_type',
help="Client type."
)
# Optional options.
parser.add_argument(
'-u',
'--username',
help="Username of a user to associate with the Client."
)
parser.add_argument(
'-n',
'--client_name',
help="String to assign as the Client name."
)
parser.add_argument(
'-i',
'--client_id',
help="String to assign as the Client ID."
)
parser.add_argument(
'-s',
'--client_secret',
help="String to assign as the Client secret. Should not be shared."
)
parser.add_argument(
'-t',
'--trusted',
action='store_true',
help="Designate the Client as trusted. Trusted Clients bypass the user consent "
"form typically displayed after validating the user's credentials."
)
parser.add_argument(
'--logout_uri',
help="Client logout URI. This value will be used for single sign out."
)
def handle(self, *args, **options):
self._clean_required_args(options['url'], options['redirect_uri'], options['client_type'])
self._parse_options(options)
client_id = self.fields.get('client_id')
trusted = self.fields.pop('trusted')
# Check if client ID is already in use. If so, fetch existing Client and update fields.
client_id_claimed = Client.objects.filter(client_id=client_id).exists()
if client_id_claimed:
client = Client.objects.get(client_id=client_id)
for key, value in self.fields.items():
setattr(client, key, value)
client.save()
else:
client = Client.objects.create(**self.fields)
if trusted:
TrustedClient.objects.get_or_create(client=client)
else:
try:
TrustedClient.objects.get(client=client).delete()
except TrustedClient.DoesNotExist:
pass
serialized = json.dumps(client.serialize(), indent=4)
self.stdout.write(serialized)
def _clean_required_args(self, url, redirect_uri, client_type):
"""
Validate and clean the command's arguments.
Arguments:
url (str): Client's application URL.
redirect_uri (str): Client application's OAuth2 callback URI.
client_type (str): Client's type, indicating whether the Client application
is capable of maintaining the confidentiality of its credentials (e.g., running on a
secure server) or is incapable of doing so (e.g., running in a browser).
Raises:
CommandError, if the URLs provided are invalid, or if the client type provided is invalid.
"""
# Validate URLs
for url_to_validate in (url, redirect_uri):
try:
URLValidator()(url_to_validate)
except ValidationError:
raise CommandError("URLs provided are invalid. Please provide valid application and redirect URLs.")
# Validate and map client type to the appropriate django-oauth2-provider constant
client_type = client_type.lower()
client_type = {
'confidential': CONFIDENTIAL,
'public': PUBLIC
}.get(client_type)
if client_type is None:
raise CommandError("Client type provided is invalid. Please use one of 'confidential' or 'public'.")
self.fields = { # pylint: disable=attribute-defined-outside-init
'url': url,
'redirect_uri': redirect_uri,
'client_type': client_type,
}
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/core.py
|
id_token
|
python
|
def id_token(access_token, nonce=None, claims_request=None):
handlers = HANDLERS['id_token']
# Select only the relevant section of the claims request.
claims_request_section = claims_request.get('id_token', {}) if claims_request else {}
scope_request = provider.scope.to_names(access_token.scope)
if nonce:
claims_request_section.update({'nonce': {'value': nonce}})
scopes, claims = collect(
handlers,
access_token,
scope_request=scope_request,
claims_request=claims_request_section,
)
return IDToken(access_token, scopes, claims)
|
Returns data required for an OpenID Connect ID Token according to:
- http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
access_token (:class:`AccessToken`): Associated OAuth2 access token.
nonce (str): Optional nonce to protect against replay attacks.
claims_request (dict): Optional dictionary with the claims request parameters.
Information on the `claims_request` parameter specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns an :class:`IDToken` instance with the scopes from the
access_token and the corresponding claims. Claims in the
`claims_request` paramater id_token section will be included *in
addition* to the ones corresponding to the scopes specified in the
`access_token`.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/core.py#L59-L99
|
[
"def collect(handlers, access_token, scope_request=None, claims_request=None):\n \"\"\"\n Collect all the claims values from the `handlers`.\n\n Arguments:\n handlers (list): List of claim :class:`Handler` classes.\n access_token (:class:AccessToken): Associated access token.\n scope_request (list): List of requested scopes.\n claims_request (dict): Dictionary with only the relevant section of a\n OpenID Connect claims request.\n\n Returns a list of the scopes from `scope_request` that are authorized, and a\n dictionary of the claims associated with the authorized scopes in\n `scope_request`, and additionally, the authorized claims listed in\n `claims_request`.\n\n \"\"\"\n user = access_token.user\n client = access_token.client\n\n # Instantiate handlers. Each handler is instanciated only once, allowing the\n # handler to keep state in-between calls to its scope and claim methods.\n\n handlers = [cls() for cls in handlers]\n\n # Find all authorized scopes by including the access_token scopes. Note\n # that the handlers determine if a scope is authorized, not its presense in\n # the access_token.\n\n required_scopes = set(REQUIRED_SCOPES)\n token_scopes = set(provider.scope.to_names(access_token.scope))\n authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client)\n\n # Select only the authorized scopes from the requested scopes.\n\n scope_request = set(scope_request) if scope_request else set()\n scopes = required_scopes | (authorized_scopes & scope_request)\n\n # Find all authorized claims names for the authorized_scopes.\n\n authorized_names = _collect_names(handlers, authorized_scopes, user, client)\n\n # Select only the requested claims if no scope has been requested. Selecting\n # scopes has prevalence over selecting claims.\n\n claims_request = _validate_claim_request(claims_request)\n\n # Add the requested claims that are authorized to the response.\n\n requested_names = set(claims_request.keys()) & authorized_names\n names = _collect_names(handlers, scopes, user, client) | requested_names\n\n # Get the values for the claims.\n\n claims = _collect_values(\n handlers,\n names=names,\n user=user,\n client=client,\n values=claims_request or {}\n )\n\n return authorized_scopes, claims\n"
] |
"""
OpenID Connect core related utility functions.
Defines utility functions to process the ID Token and UserInfo
endpoints according to the OpenID Connect specification.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import jwt
import provider.scope
from django.utils.module_loading import import_string
from .. import constants
from .collect import collect
HANDLERS = {
'id_token': [import_string(cls) for cls in constants.ID_TOKEN_HANDLERS],
'userinfo': [import_string(cls) for cls in constants.USERINFO_HANDLERS]
}
class IDToken(object):
"""
Simple container for OpenID Connect related responses.
Attributes:
access_token (:class:`AccessToken`): Associated Access Token object.
scopes (list): List of scope names.
claims (dict): Dictionary of claim names and values.
"""
def __init__(self, access_token, scopes, claims):
self.access_token = access_token
self.scopes = scopes
self.claims = claims
def encode(self, secret, algorithm='HS256'):
"""
Encode the set of claims to the JWT (JSON Web Token) format
according to the OpenID Connect specification:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
claims (dict): A dictionary with the OpenID Connect claims.
secret (str): Secret used to encode the id_token.
algorithm (str): Algorithm used for encoding.
Defaults to HS256.
Returns encoded JWT token string.
"""
return jwt.encode(self.claims, secret, algorithm)
def userinfo(access_token, scope_request=None, claims_request=None):
"""
Returns data required for an OpenID Connect UserInfo response, according to:
http://openid.net/specs/openid-connect-basic-1_0.html#UserInfoResponse
Supports scope and claims request parameter as described in:
- http://openid.net/specs/openid-connect-core-1_0.html#ScopeClaims
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Arguments: access_token (:class:`AccessToken`): Associated access
token. scope_request (list): Optional list of requested
scopes. Only scopes authorized in the `access_token` will be
considered. claims_request
(dict): Optional dictionary with a claims request parameter.
Information on the claims request parameter specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
As a convinience, if neither `scope_request` or user_info claim is
specified in the `claims_request`, it will return the claims for
all the scopes in the `access_token`.
Returns an :class:`IDToken` instance with the scopes from the
`scope_request` and the corresponding claims. Claims in the
`claims_request` paramater userinfo section will be included *in
addition* to the ones corresponding to `scope_request`.
"""
handlers = HANDLERS['userinfo']
# Select only the relevant section of the claims request.
claims_request_section = claims_request.get('userinfo', {}) if claims_request else {}
# If nothing is requested, return the claims for the scopes in the access token.
if not scope_request and not claims_request_section:
scope_request = provider.scope.to_names(access_token.scope)
else:
scope_request = scope_request
scopes, claims = collect(
handlers,
access_token,
scope_request=scope_request,
claims_request=claims_request_section,
)
return IDToken(access_token, scopes, claims)
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/core.py
|
userinfo
|
python
|
def userinfo(access_token, scope_request=None, claims_request=None):
handlers = HANDLERS['userinfo']
# Select only the relevant section of the claims request.
claims_request_section = claims_request.get('userinfo', {}) if claims_request else {}
# If nothing is requested, return the claims for the scopes in the access token.
if not scope_request and not claims_request_section:
scope_request = provider.scope.to_names(access_token.scope)
else:
scope_request = scope_request
scopes, claims = collect(
handlers,
access_token,
scope_request=scope_request,
claims_request=claims_request_section,
)
return IDToken(access_token, scopes, claims)
|
Returns data required for an OpenID Connect UserInfo response, according to:
http://openid.net/specs/openid-connect-basic-1_0.html#UserInfoResponse
Supports scope and claims request parameter as described in:
- http://openid.net/specs/openid-connect-core-1_0.html#ScopeClaims
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Arguments: access_token (:class:`AccessToken`): Associated access
token. scope_request (list): Optional list of requested
scopes. Only scopes authorized in the `access_token` will be
considered. claims_request
(dict): Optional dictionary with a claims request parameter.
Information on the claims request parameter specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
As a convinience, if neither `scope_request` or user_info claim is
specified in the `claims_request`, it will return the claims for
all the scopes in the `access_token`.
Returns an :class:`IDToken` instance with the scopes from the
`scope_request` and the corresponding claims. Claims in the
`claims_request` paramater userinfo section will be included *in
addition* to the ones corresponding to `scope_request`.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/core.py#L102-L152
|
[
"def collect(handlers, access_token, scope_request=None, claims_request=None):\n \"\"\"\n Collect all the claims values from the `handlers`.\n\n Arguments:\n handlers (list): List of claim :class:`Handler` classes.\n access_token (:class:AccessToken): Associated access token.\n scope_request (list): List of requested scopes.\n claims_request (dict): Dictionary with only the relevant section of a\n OpenID Connect claims request.\n\n Returns a list of the scopes from `scope_request` that are authorized, and a\n dictionary of the claims associated with the authorized scopes in\n `scope_request`, and additionally, the authorized claims listed in\n `claims_request`.\n\n \"\"\"\n user = access_token.user\n client = access_token.client\n\n # Instantiate handlers. Each handler is instanciated only once, allowing the\n # handler to keep state in-between calls to its scope and claim methods.\n\n handlers = [cls() for cls in handlers]\n\n # Find all authorized scopes by including the access_token scopes. Note\n # that the handlers determine if a scope is authorized, not its presense in\n # the access_token.\n\n required_scopes = set(REQUIRED_SCOPES)\n token_scopes = set(provider.scope.to_names(access_token.scope))\n authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client)\n\n # Select only the authorized scopes from the requested scopes.\n\n scope_request = set(scope_request) if scope_request else set()\n scopes = required_scopes | (authorized_scopes & scope_request)\n\n # Find all authorized claims names for the authorized_scopes.\n\n authorized_names = _collect_names(handlers, authorized_scopes, user, client)\n\n # Select only the requested claims if no scope has been requested. Selecting\n # scopes has prevalence over selecting claims.\n\n claims_request = _validate_claim_request(claims_request)\n\n # Add the requested claims that are authorized to the response.\n\n requested_names = set(claims_request.keys()) & authorized_names\n names = _collect_names(handlers, scopes, user, client) | requested_names\n\n # Get the values for the claims.\n\n claims = _collect_values(\n handlers,\n names=names,\n user=user,\n client=client,\n values=claims_request or {}\n )\n\n return authorized_scopes, claims\n"
] |
"""
OpenID Connect core related utility functions.
Defines utility functions to process the ID Token and UserInfo
endpoints according to the OpenID Connect specification.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import jwt
import provider.scope
from django.utils.module_loading import import_string
from .. import constants
from .collect import collect
HANDLERS = {
'id_token': [import_string(cls) for cls in constants.ID_TOKEN_HANDLERS],
'userinfo': [import_string(cls) for cls in constants.USERINFO_HANDLERS]
}
class IDToken(object):
"""
Simple container for OpenID Connect related responses.
Attributes:
access_token (:class:`AccessToken`): Associated Access Token object.
scopes (list): List of scope names.
claims (dict): Dictionary of claim names and values.
"""
def __init__(self, access_token, scopes, claims):
self.access_token = access_token
self.scopes = scopes
self.claims = claims
def encode(self, secret, algorithm='HS256'):
"""
Encode the set of claims to the JWT (JSON Web Token) format
according to the OpenID Connect specification:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
claims (dict): A dictionary with the OpenID Connect claims.
secret (str): Secret used to encode the id_token.
algorithm (str): Algorithm used for encoding.
Defaults to HS256.
Returns encoded JWT token string.
"""
return jwt.encode(self.claims, secret, algorithm)
def id_token(access_token, nonce=None, claims_request=None):
"""
Returns data required for an OpenID Connect ID Token according to:
- http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
access_token (:class:`AccessToken`): Associated OAuth2 access token.
nonce (str): Optional nonce to protect against replay attacks.
claims_request (dict): Optional dictionary with the claims request parameters.
Information on the `claims_request` parameter specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns an :class:`IDToken` instance with the scopes from the
access_token and the corresponding claims. Claims in the
`claims_request` paramater id_token section will be included *in
addition* to the ones corresponding to the scopes specified in the
`access_token`.
"""
handlers = HANDLERS['id_token']
# Select only the relevant section of the claims request.
claims_request_section = claims_request.get('id_token', {}) if claims_request else {}
scope_request = provider.scope.to_names(access_token.scope)
if nonce:
claims_request_section.update({'nonce': {'value': nonce}})
scopes, claims = collect(
handlers,
access_token,
scope_request=scope_request,
claims_request=claims_request_section,
)
return IDToken(access_token, scopes, claims)
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/core.py
|
IDToken.encode
|
python
|
def encode(self, secret, algorithm='HS256'):
return jwt.encode(self.claims, secret, algorithm)
|
Encode the set of claims to the JWT (JSON Web Token) format
according to the OpenID Connect specification:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Arguments:
claims (dict): A dictionary with the OpenID Connect claims.
secret (str): Secret used to encode the id_token.
algorithm (str): Algorithm used for encoding.
Defaults to HS256.
Returns encoded JWT token string.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/core.py#L39-L56
| null |
class IDToken(object):
"""
Simple container for OpenID Connect related responses.
Attributes:
access_token (:class:`AccessToken`): Associated Access Token object.
scopes (list): List of scope names.
claims (dict): Dictionary of claim names and values.
"""
def __init__(self, access_token, scopes, claims):
self.access_token = access_token
self.scopes = scopes
self.claims = claims
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/views.py
|
AccessTokenView.access_token_response_data
|
python
|
def access_token_response_data(self, access_token, response_type=None, nonce=''):
# Clear the scope for requests that do not use OpenID Connect.
# Scopes for pure OAuth2 request are currently not supported.
scope = constants.DEFAULT_SCOPE
extra_data = {}
# Add OpenID Connect `id_token` if requested.
#
# TODO: Unfourtunately because of how django-oauth2-provider implements
# scopes, we cannot check if `openid` is the first scope to be
# requested, as required by OpenID Connect specification.
if provider.scope.check(constants.OPEN_ID_SCOPE, access_token.scope):
id_token = self.get_id_token(access_token, nonce)
extra_data['id_token'] = self.encode_id_token(id_token)
scope = provider.scope.to_int(*id_token.scopes)
# Update the token scope, so it includes only authorized values.
access_token.scope = scope
access_token.save()
# Get the main fields for OAuth2 response.
response_data = super(AccessTokenView, self).access_token_response_data(access_token)
# Add any additional fields if OpenID Connect is requested. The order of
# the addition makes sures the OAuth2 values are not overrided.
response_data = dict(extra_data.items() + response_data.items())
return response_data
|
Return `access_token` fields for OAuth2, and add `id_token` fields for
OpenID Connect according to the `access_token` scope.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/views.py#L110-L145
|
[
"def get_id_token(self, access_token, nonce):\n \"\"\" Return an ID token for the given Access Token. \"\"\"\n\n claims_string = self.request.POST.get('claims')\n claims_request = json.loads(claims_string) if claims_string else {}\n\n return oidc.id_token(access_token, nonce, claims_request)\n",
"def encode_id_token(self, id_token):\n \"\"\"\n Return encoded ID token.\n\n \"\"\"\n\n # Encode the ID token using the `client_secret`.\n #\n # TODO: Using the `client_secret` is not ideal, since it is transmitted\n # over the wire in some authentication flows. A better alternative is\n # to use the public key of the issuer, which also allows the ID token to\n # be shared among clients. Doing so however adds some operational\n # costs. We should consider this for the future.\n\n secret = id_token.access_token.client.client_secret\n\n return id_token.encode(secret)\n"
] |
class AccessTokenView(provider.oauth2.views.AccessTokenView):
"""
Customized OAuth2 access token view.
Allows usage of email as main identifier when requesting a password grant.
Support the ID Token endpoint following the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#TokenEndpoint
By default it returns all the claims available to the scope requested,
and available to the claim handlers configured by `OAUTH_OIDC_ID_TOKEN_HANDLERS`
"""
# Add custom authentication provider, to support email as username.
authentication = (provider.oauth2.views.AccessTokenView.authentication +
(PublicPasswordBackend, ))
# The following grant overrides make sure the view uses our customized forms.
# pylint: disable=no-member
def get_authorization_code_grant(self, _request, data, client):
form = AuthorizationCodeGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data.get('grant')
# pylint: disable=no-member
def get_refresh_token_grant(self, _request, data, client):
form = RefreshTokenGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data.get('refresh_token')
# pylint: disable=no-member
def get_password_grant(self, _request, data, client):
# Use customized form to allow use of user email during authentication.
form = PasswordGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data
# pylint: disable=super-on-old-class
def get_id_token(self, access_token, nonce):
""" Return an ID token for the given Access Token. """
claims_string = self.request.POST.get('claims')
claims_request = json.loads(claims_string) if claims_string else {}
return oidc.id_token(access_token, nonce, claims_request)
def encode_id_token(self, id_token):
"""
Return encoded ID token.
"""
# Encode the ID token using the `client_secret`.
#
# TODO: Using the `client_secret` is not ideal, since it is transmitted
# over the wire in some authentication flows. A better alternative is
# to use the public key of the issuer, which also allows the ID token to
# be shared among clients. Doing so however adds some operational
# costs. We should consider this for the future.
secret = id_token.access_token.client.client_secret
return id_token.encode(secret)
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/views.py
|
AccessTokenView.get_id_token
|
python
|
def get_id_token(self, access_token, nonce):
claims_string = self.request.POST.get('claims')
claims_request = json.loads(claims_string) if claims_string else {}
return oidc.id_token(access_token, nonce, claims_request)
|
Return an ID token for the given Access Token.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/views.py#L147-L153
|
[
"def id_token(access_token, nonce=None, claims_request=None):\n \"\"\"\n Returns data required for an OpenID Connect ID Token according to:\n\n - http://openid.net/specs/openid-connect-basic-1_0.html#IDToken\n\n Arguments:\n access_token (:class:`AccessToken`): Associated OAuth2 access token.\n nonce (str): Optional nonce to protect against replay attacks.\n claims_request (dict): Optional dictionary with the claims request parameters.\n\n Information on the `claims_request` parameter specification:\n\n - http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter\n\n Returns an :class:`IDToken` instance with the scopes from the\n access_token and the corresponding claims. Claims in the\n `claims_request` paramater id_token section will be included *in\n addition* to the ones corresponding to the scopes specified in the\n `access_token`.\n\n \"\"\"\n\n handlers = HANDLERS['id_token']\n\n # Select only the relevant section of the claims request.\n claims_request_section = claims_request.get('id_token', {}) if claims_request else {}\n\n scope_request = provider.scope.to_names(access_token.scope)\n\n if nonce:\n claims_request_section.update({'nonce': {'value': nonce}})\n\n scopes, claims = collect(\n handlers,\n access_token,\n scope_request=scope_request,\n claims_request=claims_request_section,\n )\n\n return IDToken(access_token, scopes, claims)\n"
] |
class AccessTokenView(provider.oauth2.views.AccessTokenView):
"""
Customized OAuth2 access token view.
Allows usage of email as main identifier when requesting a password grant.
Support the ID Token endpoint following the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#TokenEndpoint
By default it returns all the claims available to the scope requested,
and available to the claim handlers configured by `OAUTH_OIDC_ID_TOKEN_HANDLERS`
"""
# Add custom authentication provider, to support email as username.
authentication = (provider.oauth2.views.AccessTokenView.authentication +
(PublicPasswordBackend, ))
# The following grant overrides make sure the view uses our customized forms.
# pylint: disable=no-member
def get_authorization_code_grant(self, _request, data, client):
form = AuthorizationCodeGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data.get('grant')
# pylint: disable=no-member
def get_refresh_token_grant(self, _request, data, client):
form = RefreshTokenGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data.get('refresh_token')
# pylint: disable=no-member
def get_password_grant(self, _request, data, client):
# Use customized form to allow use of user email during authentication.
form = PasswordGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data
# pylint: disable=super-on-old-class
def access_token_response_data(self, access_token, response_type=None, nonce=''):
"""
Return `access_token` fields for OAuth2, and add `id_token` fields for
OpenID Connect according to the `access_token` scope.
"""
# Clear the scope for requests that do not use OpenID Connect.
# Scopes for pure OAuth2 request are currently not supported.
scope = constants.DEFAULT_SCOPE
extra_data = {}
# Add OpenID Connect `id_token` if requested.
#
# TODO: Unfourtunately because of how django-oauth2-provider implements
# scopes, we cannot check if `openid` is the first scope to be
# requested, as required by OpenID Connect specification.
if provider.scope.check(constants.OPEN_ID_SCOPE, access_token.scope):
id_token = self.get_id_token(access_token, nonce)
extra_data['id_token'] = self.encode_id_token(id_token)
scope = provider.scope.to_int(*id_token.scopes)
# Update the token scope, so it includes only authorized values.
access_token.scope = scope
access_token.save()
# Get the main fields for OAuth2 response.
response_data = super(AccessTokenView, self).access_token_response_data(access_token)
# Add any additional fields if OpenID Connect is requested. The order of
# the addition makes sures the OAuth2 values are not overrided.
response_data = dict(extra_data.items() + response_data.items())
return response_data
def encode_id_token(self, id_token):
"""
Return encoded ID token.
"""
# Encode the ID token using the `client_secret`.
#
# TODO: Using the `client_secret` is not ideal, since it is transmitted
# over the wire in some authentication flows. A better alternative is
# to use the public key of the issuer, which also allows the ID token to
# be shared among clients. Doing so however adds some operational
# costs. We should consider this for the future.
secret = id_token.access_token.client.client_secret
return id_token.encode(secret)
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/views.py
|
AccessTokenView.encode_id_token
|
python
|
def encode_id_token(self, id_token):
# Encode the ID token using the `client_secret`.
#
# TODO: Using the `client_secret` is not ideal, since it is transmitted
# over the wire in some authentication flows. A better alternative is
# to use the public key of the issuer, which also allows the ID token to
# be shared among clients. Doing so however adds some operational
# costs. We should consider this for the future.
secret = id_token.access_token.client.client_secret
return id_token.encode(secret)
|
Return encoded ID token.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/views.py#L155-L171
| null |
class AccessTokenView(provider.oauth2.views.AccessTokenView):
"""
Customized OAuth2 access token view.
Allows usage of email as main identifier when requesting a password grant.
Support the ID Token endpoint following the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#TokenEndpoint
By default it returns all the claims available to the scope requested,
and available to the claim handlers configured by `OAUTH_OIDC_ID_TOKEN_HANDLERS`
"""
# Add custom authentication provider, to support email as username.
authentication = (provider.oauth2.views.AccessTokenView.authentication +
(PublicPasswordBackend, ))
# The following grant overrides make sure the view uses our customized forms.
# pylint: disable=no-member
def get_authorization_code_grant(self, _request, data, client):
form = AuthorizationCodeGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data.get('grant')
# pylint: disable=no-member
def get_refresh_token_grant(self, _request, data, client):
form = RefreshTokenGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data.get('refresh_token')
# pylint: disable=no-member
def get_password_grant(self, _request, data, client):
# Use customized form to allow use of user email during authentication.
form = PasswordGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data
# pylint: disable=super-on-old-class
def access_token_response_data(self, access_token, response_type=None, nonce=''):
"""
Return `access_token` fields for OAuth2, and add `id_token` fields for
OpenID Connect according to the `access_token` scope.
"""
# Clear the scope for requests that do not use OpenID Connect.
# Scopes for pure OAuth2 request are currently not supported.
scope = constants.DEFAULT_SCOPE
extra_data = {}
# Add OpenID Connect `id_token` if requested.
#
# TODO: Unfourtunately because of how django-oauth2-provider implements
# scopes, we cannot check if `openid` is the first scope to be
# requested, as required by OpenID Connect specification.
if provider.scope.check(constants.OPEN_ID_SCOPE, access_token.scope):
id_token = self.get_id_token(access_token, nonce)
extra_data['id_token'] = self.encode_id_token(id_token)
scope = provider.scope.to_int(*id_token.scopes)
# Update the token scope, so it includes only authorized values.
access_token.scope = scope
access_token.save()
# Get the main fields for OAuth2 response.
response_data = super(AccessTokenView, self).access_token_response_data(access_token)
# Add any additional fields if OpenID Connect is requested. The order of
# the addition makes sures the OAuth2 values are not overrided.
response_data = dict(extra_data.items() + response_data.items())
return response_data
def get_id_token(self, access_token, nonce):
""" Return an ID token for the given Access Token. """
claims_string = self.request.POST.get('claims')
claims_request = json.loads(claims_string) if claims_string else {}
return oidc.id_token(access_token, nonce, claims_request)
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/views.py
|
UserInfoView.get
|
python
|
def get(self, request, *_args, **_kwargs):
access_token = self.access_token
scope_string = request.GET.get('scope')
scope_request = scope_string.split() if scope_string else None
claims_string = request.GET.get('claims')
claims_request = json.loads(claims_string) if claims_string else None
if not provider.scope.check(constants.OPEN_ID_SCOPE, access_token.scope):
return self._bad_request('Missing openid scope.')
try:
claims = self.userinfo_claims(access_token, scope_request, claims_request)
except ValueError, exception:
return self._bad_request(str(exception))
# TODO: Encode and sign responses if requested.
response = JsonResponse(claims)
return response
|
Respond to a UserInfo request.
Two optional query parameters are accepted, scope and claims.
See the references above for more details.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/views.py#L244-L273
| null |
class UserInfoView(ProtectedView):
"""
Implementation of the Basic OpenID Connect UserInfo endpoint as described in:
- http://openid.net/specs/openid-connect-basic-1_0.html#UserInfo
By default it returns all the claims available to the `access_token` used, and available
to the claim handlers configured by `OAUTH_OIDC_USERINFO_HANDLERS`
In addition to the standard UserInfo response, this view also accepts custom scope
and claims requests, using the scope and claims parameters as described in:
http://openid.net/specs/openid-connect-core-1_0.html#ScopeClaims
http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Normally, such requests can only be done when requesting an ID Token. However, it is
also convinient to support then in the UserInfo endpoint to do simply authorization checks.
It ignores the top level claims request for `id_token`, in the claims
request, using only the `userinfo` section.
All requests to this endpoint must include at least the 'openid' scope.
Currently only supports GET request, and does not sign any responses.
"""
def userinfo_claims(self, access_token, scope_request, claims_request):
""" Return the claims for the requested parameters. """
id_token = oidc.userinfo(access_token, scope_request, claims_request)
return id_token.claims
def _bad_request(self, msg):
""" Return a 400 error with JSON content. """
return JsonResponse({'error': msg}, status=400)
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/views.py
|
UserInfoView.userinfo_claims
|
python
|
def userinfo_claims(self, access_token, scope_request, claims_request):
id_token = oidc.userinfo(access_token, scope_request, claims_request)
return id_token.claims
|
Return the claims for the requested parameters.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/views.py#L275-L278
|
[
"def userinfo(access_token, scope_request=None, claims_request=None):\n \"\"\"\n Returns data required for an OpenID Connect UserInfo response, according to:\n\n http://openid.net/specs/openid-connect-basic-1_0.html#UserInfoResponse\n\n Supports scope and claims request parameter as described in:\n\n - http://openid.net/specs/openid-connect-core-1_0.html#ScopeClaims\n - http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter\n\n Arguments: access_token (:class:`AccessToken`): Associated access\n token. scope_request (list): Optional list of requested\n scopes. Only scopes authorized in the `access_token` will be\n considered. claims_request\n (dict): Optional dictionary with a claims request parameter.\n\n Information on the claims request parameter specification:\n\n - http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter\n\n As a convinience, if neither `scope_request` or user_info claim is\n specified in the `claims_request`, it will return the claims for\n all the scopes in the `access_token`.\n\n Returns an :class:`IDToken` instance with the scopes from the\n `scope_request` and the corresponding claims. Claims in the\n `claims_request` paramater userinfo section will be included *in\n addition* to the ones corresponding to `scope_request`.\n\n \"\"\"\n\n handlers = HANDLERS['userinfo']\n\n # Select only the relevant section of the claims request.\n claims_request_section = claims_request.get('userinfo', {}) if claims_request else {}\n\n # If nothing is requested, return the claims for the scopes in the access token.\n if not scope_request and not claims_request_section:\n scope_request = provider.scope.to_names(access_token.scope)\n else:\n scope_request = scope_request\n\n scopes, claims = collect(\n handlers,\n access_token,\n scope_request=scope_request,\n claims_request=claims_request_section,\n )\n\n return IDToken(access_token, scopes, claims)\n"
] |
class UserInfoView(ProtectedView):
"""
Implementation of the Basic OpenID Connect UserInfo endpoint as described in:
- http://openid.net/specs/openid-connect-basic-1_0.html#UserInfo
By default it returns all the claims available to the `access_token` used, and available
to the claim handlers configured by `OAUTH_OIDC_USERINFO_HANDLERS`
In addition to the standard UserInfo response, this view also accepts custom scope
and claims requests, using the scope and claims parameters as described in:
http://openid.net/specs/openid-connect-core-1_0.html#ScopeClaims
http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Normally, such requests can only be done when requesting an ID Token. However, it is
also convinient to support then in the UserInfo endpoint to do simply authorization checks.
It ignores the top level claims request for `id_token`, in the claims
request, using only the `userinfo` section.
All requests to this endpoint must include at least the 'openid' scope.
Currently only supports GET request, and does not sign any responses.
"""
def get(self, request, *_args, **_kwargs):
"""
Respond to a UserInfo request.
Two optional query parameters are accepted, scope and claims.
See the references above for more details.
"""
access_token = self.access_token
scope_string = request.GET.get('scope')
scope_request = scope_string.split() if scope_string else None
claims_string = request.GET.get('claims')
claims_request = json.loads(claims_string) if claims_string else None
if not provider.scope.check(constants.OPEN_ID_SCOPE, access_token.scope):
return self._bad_request('Missing openid scope.')
try:
claims = self.userinfo_claims(access_token, scope_request, claims_request)
except ValueError, exception:
return self._bad_request(str(exception))
# TODO: Encode and sign responses if requested.
response = JsonResponse(claims)
return response
def _bad_request(self, msg):
""" Return a 400 error with JSON content. """
return JsonResponse({'error': msg}, status=400)
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/collect.py
|
collect
|
python
|
def collect(handlers, access_token, scope_request=None, claims_request=None):
user = access_token.user
client = access_token.client
# Instantiate handlers. Each handler is instanciated only once, allowing the
# handler to keep state in-between calls to its scope and claim methods.
handlers = [cls() for cls in handlers]
# Find all authorized scopes by including the access_token scopes. Note
# that the handlers determine if a scope is authorized, not its presense in
# the access_token.
required_scopes = set(REQUIRED_SCOPES)
token_scopes = set(provider.scope.to_names(access_token.scope))
authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client)
# Select only the authorized scopes from the requested scopes.
scope_request = set(scope_request) if scope_request else set()
scopes = required_scopes | (authorized_scopes & scope_request)
# Find all authorized claims names for the authorized_scopes.
authorized_names = _collect_names(handlers, authorized_scopes, user, client)
# Select only the requested claims if no scope has been requested. Selecting
# scopes has prevalence over selecting claims.
claims_request = _validate_claim_request(claims_request)
# Add the requested claims that are authorized to the response.
requested_names = set(claims_request.keys()) & authorized_names
names = _collect_names(handlers, scopes, user, client) | requested_names
# Get the values for the claims.
claims = _collect_values(
handlers,
names=names,
user=user,
client=client,
values=claims_request or {}
)
return authorized_scopes, claims
|
Collect all the claims values from the `handlers`.
Arguments:
handlers (list): List of claim :class:`Handler` classes.
access_token (:class:AccessToken): Associated access token.
scope_request (list): List of requested scopes.
claims_request (dict): Dictionary with only the relevant section of a
OpenID Connect claims request.
Returns a list of the scopes from `scope_request` that are authorized, and a
dictionary of the claims associated with the authorized scopes in
`scope_request`, and additionally, the authorized claims listed in
`claims_request`.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/collect.py#L19-L81
|
[
"def _collect_scopes(handlers, scopes, user, client):\n \"\"\" Get a set of all the authorized scopes according to the handlers. \"\"\"\n results = set()\n\n data = {'user': user, 'client': client}\n\n def visitor(scope_name, func):\n claim_names = func(data)\n # If the claim_names is None, it means that the scope is not authorized.\n if claim_names is not None:\n results.add(scope_name)\n\n _visit_handlers(handlers, visitor, 'scope', scopes)\n\n return results\n",
"def _collect_names(handlers, scopes, user, client):\n \"\"\" Get the names of the claims supported by the handlers for the requested scope. \"\"\"\n\n results = set()\n\n data = {'user': user, 'client': client}\n\n def visitor(_scope_name, func):\n claim_names = func(data)\n # If the claim_names is None, it means that the scope is not authorized.\n if claim_names is not None:\n results.update(claim_names)\n\n _visit_handlers(handlers, visitor, 'scope', scopes)\n\n return results\n",
"def _validate_claim_request(claims, ignore_errors=False):\n \"\"\"\n Validates a claim request section (`userinfo` or `id_token`) according\n to section 5.5 of the OpenID Connect specification:\n\n - http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter\n\n Returns a copy of the claim request with only the valid fields and values.\n\n Raises ValueError is the claim request is invalid and `ignore_errors` is False\n\n \"\"\"\n\n results = {}\n claims = claims if claims else {}\n\n for name, value in claims.iteritems():\n if value is None:\n results[name] = None\n elif isinstance(value, dict):\n results[name] = _validate_claim_values(name, value, ignore_errors)\n else:\n if not ignore_errors:\n msg = 'Invalid claim {}.'.format(name)\n raise ValueError(msg)\n\n return results\n",
"def _collect_values(handlers, names, user, client, values):\n \"\"\" Get the values from the handlers of the requested claims. \"\"\"\n\n results = {}\n\n def visitor(claim_name, func):\n data = {'user': user, 'client': client}\n data.update(values.get(claim_name) or {})\n claim_value = func(data)\n # If the claim_value is None, it means that the claim is not authorized.\n if claim_value is not None:\n # New values overwrite previous results\n results[claim_name] = claim_value\n\n _visit_handlers(handlers, visitor, 'claim', names)\n\n return results\n"
] |
"""
Functions to collect OpenID Connect values from claim handlers.
For details on the format of the claim handlers, see
:mod:`oauth2_provider.oicd.handlers`
None: The functions in this module assume the `openid` scope is implied.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import provider.scope
REQUIRED_SCOPES = ['openid']
CLAIM_REQUEST_FIELDS = ['value', 'values', 'essential']
def _collect_scopes(handlers, scopes, user, client):
""" Get a set of all the authorized scopes according to the handlers. """
results = set()
data = {'user': user, 'client': client}
def visitor(scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.add(scope_name)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_names(handlers, scopes, user, client):
""" Get the names of the claims supported by the handlers for the requested scope. """
results = set()
data = {'user': user, 'client': client}
def visitor(_scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.update(claim_names)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_values(handlers, names, user, client, values):
""" Get the values from the handlers of the requested claims. """
results = {}
def visitor(claim_name, func):
data = {'user': user, 'client': client}
data.update(values.get(claim_name) or {})
claim_value = func(data)
# If the claim_value is None, it means that the claim is not authorized.
if claim_value is not None:
# New values overwrite previous results
results[claim_name] = claim_value
_visit_handlers(handlers, visitor, 'claim', names)
return results
def _validate_claim_request(claims, ignore_errors=False):
"""
Validates a claim request section (`userinfo` or `id_token`) according
to section 5.5 of the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns a copy of the claim request with only the valid fields and values.
Raises ValueError is the claim request is invalid and `ignore_errors` is False
"""
results = {}
claims = claims if claims else {}
for name, value in claims.iteritems():
if value is None:
results[name] = None
elif isinstance(value, dict):
results[name] = _validate_claim_values(name, value, ignore_errors)
else:
if not ignore_errors:
msg = 'Invalid claim {}.'.format(name)
raise ValueError(msg)
return results
def _validate_claim_values(name, value, ignore_errors):
""" Helper for `validate_claim_request` """
results = {'essential': False}
for key, value in value.iteritems():
if key in CLAIM_REQUEST_FIELDS:
results[key] = value
else:
if not ignore_errors:
msg = 'Unknown attribute {} in claim value {}.'.format(key, name)
raise ValueError(msg)
return results
def _visit_handlers(handlers, visitor, prefix, suffixes):
""" Use visitor partern to collect information from handlers """
results = []
for handler in handlers:
for suffix in suffixes:
func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)
if func:
results.append(visitor(suffix, func))
return results
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/collect.py
|
_collect_scopes
|
python
|
def _collect_scopes(handlers, scopes, user, client):
results = set()
data = {'user': user, 'client': client}
def visitor(scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.add(scope_name)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
|
Get a set of all the authorized scopes according to the handlers.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/collect.py#L84-L98
|
[
"def _visit_handlers(handlers, visitor, prefix, suffixes):\n \"\"\" Use visitor partern to collect information from handlers \"\"\"\n\n results = []\n for handler in handlers:\n for suffix in suffixes:\n func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)\n if func:\n results.append(visitor(suffix, func))\n\n return results\n"
] |
"""
Functions to collect OpenID Connect values from claim handlers.
For details on the format of the claim handlers, see
:mod:`oauth2_provider.oicd.handlers`
None: The functions in this module assume the `openid` scope is implied.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import provider.scope
REQUIRED_SCOPES = ['openid']
CLAIM_REQUEST_FIELDS = ['value', 'values', 'essential']
def collect(handlers, access_token, scope_request=None, claims_request=None):
"""
Collect all the claims values from the `handlers`.
Arguments:
handlers (list): List of claim :class:`Handler` classes.
access_token (:class:AccessToken): Associated access token.
scope_request (list): List of requested scopes.
claims_request (dict): Dictionary with only the relevant section of a
OpenID Connect claims request.
Returns a list of the scopes from `scope_request` that are authorized, and a
dictionary of the claims associated with the authorized scopes in
`scope_request`, and additionally, the authorized claims listed in
`claims_request`.
"""
user = access_token.user
client = access_token.client
# Instantiate handlers. Each handler is instanciated only once, allowing the
# handler to keep state in-between calls to its scope and claim methods.
handlers = [cls() for cls in handlers]
# Find all authorized scopes by including the access_token scopes. Note
# that the handlers determine if a scope is authorized, not its presense in
# the access_token.
required_scopes = set(REQUIRED_SCOPES)
token_scopes = set(provider.scope.to_names(access_token.scope))
authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client)
# Select only the authorized scopes from the requested scopes.
scope_request = set(scope_request) if scope_request else set()
scopes = required_scopes | (authorized_scopes & scope_request)
# Find all authorized claims names for the authorized_scopes.
authorized_names = _collect_names(handlers, authorized_scopes, user, client)
# Select only the requested claims if no scope has been requested. Selecting
# scopes has prevalence over selecting claims.
claims_request = _validate_claim_request(claims_request)
# Add the requested claims that are authorized to the response.
requested_names = set(claims_request.keys()) & authorized_names
names = _collect_names(handlers, scopes, user, client) | requested_names
# Get the values for the claims.
claims = _collect_values(
handlers,
names=names,
user=user,
client=client,
values=claims_request or {}
)
return authorized_scopes, claims
def _collect_names(handlers, scopes, user, client):
""" Get the names of the claims supported by the handlers for the requested scope. """
results = set()
data = {'user': user, 'client': client}
def visitor(_scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.update(claim_names)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_values(handlers, names, user, client, values):
""" Get the values from the handlers of the requested claims. """
results = {}
def visitor(claim_name, func):
data = {'user': user, 'client': client}
data.update(values.get(claim_name) or {})
claim_value = func(data)
# If the claim_value is None, it means that the claim is not authorized.
if claim_value is not None:
# New values overwrite previous results
results[claim_name] = claim_value
_visit_handlers(handlers, visitor, 'claim', names)
return results
def _validate_claim_request(claims, ignore_errors=False):
"""
Validates a claim request section (`userinfo` or `id_token`) according
to section 5.5 of the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns a copy of the claim request with only the valid fields and values.
Raises ValueError is the claim request is invalid and `ignore_errors` is False
"""
results = {}
claims = claims if claims else {}
for name, value in claims.iteritems():
if value is None:
results[name] = None
elif isinstance(value, dict):
results[name] = _validate_claim_values(name, value, ignore_errors)
else:
if not ignore_errors:
msg = 'Invalid claim {}.'.format(name)
raise ValueError(msg)
return results
def _validate_claim_values(name, value, ignore_errors):
""" Helper for `validate_claim_request` """
results = {'essential': False}
for key, value in value.iteritems():
if key in CLAIM_REQUEST_FIELDS:
results[key] = value
else:
if not ignore_errors:
msg = 'Unknown attribute {} in claim value {}.'.format(key, name)
raise ValueError(msg)
return results
def _visit_handlers(handlers, visitor, prefix, suffixes):
""" Use visitor partern to collect information from handlers """
results = []
for handler in handlers:
for suffix in suffixes:
func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)
if func:
results.append(visitor(suffix, func))
return results
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/collect.py
|
_collect_names
|
python
|
def _collect_names(handlers, scopes, user, client):
results = set()
data = {'user': user, 'client': client}
def visitor(_scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.update(claim_names)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
|
Get the names of the claims supported by the handlers for the requested scope.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/collect.py#L101-L116
|
[
"def _visit_handlers(handlers, visitor, prefix, suffixes):\n \"\"\" Use visitor partern to collect information from handlers \"\"\"\n\n results = []\n for handler in handlers:\n for suffix in suffixes:\n func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)\n if func:\n results.append(visitor(suffix, func))\n\n return results\n"
] |
"""
Functions to collect OpenID Connect values from claim handlers.
For details on the format of the claim handlers, see
:mod:`oauth2_provider.oicd.handlers`
None: The functions in this module assume the `openid` scope is implied.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import provider.scope
REQUIRED_SCOPES = ['openid']
CLAIM_REQUEST_FIELDS = ['value', 'values', 'essential']
def collect(handlers, access_token, scope_request=None, claims_request=None):
"""
Collect all the claims values from the `handlers`.
Arguments:
handlers (list): List of claim :class:`Handler` classes.
access_token (:class:AccessToken): Associated access token.
scope_request (list): List of requested scopes.
claims_request (dict): Dictionary with only the relevant section of a
OpenID Connect claims request.
Returns a list of the scopes from `scope_request` that are authorized, and a
dictionary of the claims associated with the authorized scopes in
`scope_request`, and additionally, the authorized claims listed in
`claims_request`.
"""
user = access_token.user
client = access_token.client
# Instantiate handlers. Each handler is instanciated only once, allowing the
# handler to keep state in-between calls to its scope and claim methods.
handlers = [cls() for cls in handlers]
# Find all authorized scopes by including the access_token scopes. Note
# that the handlers determine if a scope is authorized, not its presense in
# the access_token.
required_scopes = set(REQUIRED_SCOPES)
token_scopes = set(provider.scope.to_names(access_token.scope))
authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client)
# Select only the authorized scopes from the requested scopes.
scope_request = set(scope_request) if scope_request else set()
scopes = required_scopes | (authorized_scopes & scope_request)
# Find all authorized claims names for the authorized_scopes.
authorized_names = _collect_names(handlers, authorized_scopes, user, client)
# Select only the requested claims if no scope has been requested. Selecting
# scopes has prevalence over selecting claims.
claims_request = _validate_claim_request(claims_request)
# Add the requested claims that are authorized to the response.
requested_names = set(claims_request.keys()) & authorized_names
names = _collect_names(handlers, scopes, user, client) | requested_names
# Get the values for the claims.
claims = _collect_values(
handlers,
names=names,
user=user,
client=client,
values=claims_request or {}
)
return authorized_scopes, claims
def _collect_scopes(handlers, scopes, user, client):
""" Get a set of all the authorized scopes according to the handlers. """
results = set()
data = {'user': user, 'client': client}
def visitor(scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.add(scope_name)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_values(handlers, names, user, client, values):
""" Get the values from the handlers of the requested claims. """
results = {}
def visitor(claim_name, func):
data = {'user': user, 'client': client}
data.update(values.get(claim_name) or {})
claim_value = func(data)
# If the claim_value is None, it means that the claim is not authorized.
if claim_value is not None:
# New values overwrite previous results
results[claim_name] = claim_value
_visit_handlers(handlers, visitor, 'claim', names)
return results
def _validate_claim_request(claims, ignore_errors=False):
"""
Validates a claim request section (`userinfo` or `id_token`) according
to section 5.5 of the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns a copy of the claim request with only the valid fields and values.
Raises ValueError is the claim request is invalid and `ignore_errors` is False
"""
results = {}
claims = claims if claims else {}
for name, value in claims.iteritems():
if value is None:
results[name] = None
elif isinstance(value, dict):
results[name] = _validate_claim_values(name, value, ignore_errors)
else:
if not ignore_errors:
msg = 'Invalid claim {}.'.format(name)
raise ValueError(msg)
return results
def _validate_claim_values(name, value, ignore_errors):
""" Helper for `validate_claim_request` """
results = {'essential': False}
for key, value in value.iteritems():
if key in CLAIM_REQUEST_FIELDS:
results[key] = value
else:
if not ignore_errors:
msg = 'Unknown attribute {} in claim value {}.'.format(key, name)
raise ValueError(msg)
return results
def _visit_handlers(handlers, visitor, prefix, suffixes):
""" Use visitor partern to collect information from handlers """
results = []
for handler in handlers:
for suffix in suffixes:
func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)
if func:
results.append(visitor(suffix, func))
return results
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/collect.py
|
_collect_values
|
python
|
def _collect_values(handlers, names, user, client, values):
results = {}
def visitor(claim_name, func):
data = {'user': user, 'client': client}
data.update(values.get(claim_name) or {})
claim_value = func(data)
# If the claim_value is None, it means that the claim is not authorized.
if claim_value is not None:
# New values overwrite previous results
results[claim_name] = claim_value
_visit_handlers(handlers, visitor, 'claim', names)
return results
|
Get the values from the handlers of the requested claims.
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/collect.py#L119-L135
|
[
"def _visit_handlers(handlers, visitor, prefix, suffixes):\n \"\"\" Use visitor partern to collect information from handlers \"\"\"\n\n results = []\n for handler in handlers:\n for suffix in suffixes:\n func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)\n if func:\n results.append(visitor(suffix, func))\n\n return results\n"
] |
"""
Functions to collect OpenID Connect values from claim handlers.
For details on the format of the claim handlers, see
:mod:`oauth2_provider.oicd.handlers`
None: The functions in this module assume the `openid` scope is implied.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import provider.scope
REQUIRED_SCOPES = ['openid']
CLAIM_REQUEST_FIELDS = ['value', 'values', 'essential']
def collect(handlers, access_token, scope_request=None, claims_request=None):
"""
Collect all the claims values from the `handlers`.
Arguments:
handlers (list): List of claim :class:`Handler` classes.
access_token (:class:AccessToken): Associated access token.
scope_request (list): List of requested scopes.
claims_request (dict): Dictionary with only the relevant section of a
OpenID Connect claims request.
Returns a list of the scopes from `scope_request` that are authorized, and a
dictionary of the claims associated with the authorized scopes in
`scope_request`, and additionally, the authorized claims listed in
`claims_request`.
"""
user = access_token.user
client = access_token.client
# Instantiate handlers. Each handler is instanciated only once, allowing the
# handler to keep state in-between calls to its scope and claim methods.
handlers = [cls() for cls in handlers]
# Find all authorized scopes by including the access_token scopes. Note
# that the handlers determine if a scope is authorized, not its presense in
# the access_token.
required_scopes = set(REQUIRED_SCOPES)
token_scopes = set(provider.scope.to_names(access_token.scope))
authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client)
# Select only the authorized scopes from the requested scopes.
scope_request = set(scope_request) if scope_request else set()
scopes = required_scopes | (authorized_scopes & scope_request)
# Find all authorized claims names for the authorized_scopes.
authorized_names = _collect_names(handlers, authorized_scopes, user, client)
# Select only the requested claims if no scope has been requested. Selecting
# scopes has prevalence over selecting claims.
claims_request = _validate_claim_request(claims_request)
# Add the requested claims that are authorized to the response.
requested_names = set(claims_request.keys()) & authorized_names
names = _collect_names(handlers, scopes, user, client) | requested_names
# Get the values for the claims.
claims = _collect_values(
handlers,
names=names,
user=user,
client=client,
values=claims_request or {}
)
return authorized_scopes, claims
def _collect_scopes(handlers, scopes, user, client):
""" Get a set of all the authorized scopes according to the handlers. """
results = set()
data = {'user': user, 'client': client}
def visitor(scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.add(scope_name)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_names(handlers, scopes, user, client):
""" Get the names of the claims supported by the handlers for the requested scope. """
results = set()
data = {'user': user, 'client': client}
def visitor(_scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.update(claim_names)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _validate_claim_request(claims, ignore_errors=False):
"""
Validates a claim request section (`userinfo` or `id_token`) according
to section 5.5 of the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns a copy of the claim request with only the valid fields and values.
Raises ValueError is the claim request is invalid and `ignore_errors` is False
"""
results = {}
claims = claims if claims else {}
for name, value in claims.iteritems():
if value is None:
results[name] = None
elif isinstance(value, dict):
results[name] = _validate_claim_values(name, value, ignore_errors)
else:
if not ignore_errors:
msg = 'Invalid claim {}.'.format(name)
raise ValueError(msg)
return results
def _validate_claim_values(name, value, ignore_errors):
""" Helper for `validate_claim_request` """
results = {'essential': False}
for key, value in value.iteritems():
if key in CLAIM_REQUEST_FIELDS:
results[key] = value
else:
if not ignore_errors:
msg = 'Unknown attribute {} in claim value {}.'.format(key, name)
raise ValueError(msg)
return results
def _visit_handlers(handlers, visitor, prefix, suffixes):
""" Use visitor partern to collect information from handlers """
results = []
for handler in handlers:
for suffix in suffixes:
func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)
if func:
results.append(visitor(suffix, func))
return results
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/collect.py
|
_validate_claim_request
|
python
|
def _validate_claim_request(claims, ignore_errors=False):
results = {}
claims = claims if claims else {}
for name, value in claims.iteritems():
if value is None:
results[name] = None
elif isinstance(value, dict):
results[name] = _validate_claim_values(name, value, ignore_errors)
else:
if not ignore_errors:
msg = 'Invalid claim {}.'.format(name)
raise ValueError(msg)
return results
|
Validates a claim request section (`userinfo` or `id_token`) according
to section 5.5 of the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns a copy of the claim request with only the valid fields and values.
Raises ValueError is the claim request is invalid and `ignore_errors` is False
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/collect.py#L138-L164
|
[
"def _validate_claim_values(name, value, ignore_errors):\n \"\"\" Helper for `validate_claim_request` \"\"\"\n results = {'essential': False}\n for key, value in value.iteritems():\n if key in CLAIM_REQUEST_FIELDS:\n results[key] = value\n else:\n if not ignore_errors:\n msg = 'Unknown attribute {} in claim value {}.'.format(key, name)\n raise ValueError(msg)\n return results\n"
] |
"""
Functions to collect OpenID Connect values from claim handlers.
For details on the format of the claim handlers, see
:mod:`oauth2_provider.oicd.handlers`
None: The functions in this module assume the `openid` scope is implied.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import provider.scope
REQUIRED_SCOPES = ['openid']
CLAIM_REQUEST_FIELDS = ['value', 'values', 'essential']
def collect(handlers, access_token, scope_request=None, claims_request=None):
"""
Collect all the claims values from the `handlers`.
Arguments:
handlers (list): List of claim :class:`Handler` classes.
access_token (:class:AccessToken): Associated access token.
scope_request (list): List of requested scopes.
claims_request (dict): Dictionary with only the relevant section of a
OpenID Connect claims request.
Returns a list of the scopes from `scope_request` that are authorized, and a
dictionary of the claims associated with the authorized scopes in
`scope_request`, and additionally, the authorized claims listed in
`claims_request`.
"""
user = access_token.user
client = access_token.client
# Instantiate handlers. Each handler is instanciated only once, allowing the
# handler to keep state in-between calls to its scope and claim methods.
handlers = [cls() for cls in handlers]
# Find all authorized scopes by including the access_token scopes. Note
# that the handlers determine if a scope is authorized, not its presense in
# the access_token.
required_scopes = set(REQUIRED_SCOPES)
token_scopes = set(provider.scope.to_names(access_token.scope))
authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client)
# Select only the authorized scopes from the requested scopes.
scope_request = set(scope_request) if scope_request else set()
scopes = required_scopes | (authorized_scopes & scope_request)
# Find all authorized claims names for the authorized_scopes.
authorized_names = _collect_names(handlers, authorized_scopes, user, client)
# Select only the requested claims if no scope has been requested. Selecting
# scopes has prevalence over selecting claims.
claims_request = _validate_claim_request(claims_request)
# Add the requested claims that are authorized to the response.
requested_names = set(claims_request.keys()) & authorized_names
names = _collect_names(handlers, scopes, user, client) | requested_names
# Get the values for the claims.
claims = _collect_values(
handlers,
names=names,
user=user,
client=client,
values=claims_request or {}
)
return authorized_scopes, claims
def _collect_scopes(handlers, scopes, user, client):
""" Get a set of all the authorized scopes according to the handlers. """
results = set()
data = {'user': user, 'client': client}
def visitor(scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.add(scope_name)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_names(handlers, scopes, user, client):
""" Get the names of the claims supported by the handlers for the requested scope. """
results = set()
data = {'user': user, 'client': client}
def visitor(_scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.update(claim_names)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_values(handlers, names, user, client, values):
""" Get the values from the handlers of the requested claims. """
results = {}
def visitor(claim_name, func):
data = {'user': user, 'client': client}
data.update(values.get(claim_name) or {})
claim_value = func(data)
# If the claim_value is None, it means that the claim is not authorized.
if claim_value is not None:
# New values overwrite previous results
results[claim_name] = claim_value
_visit_handlers(handlers, visitor, 'claim', names)
return results
def _validate_claim_values(name, value, ignore_errors):
""" Helper for `validate_claim_request` """
results = {'essential': False}
for key, value in value.iteritems():
if key in CLAIM_REQUEST_FIELDS:
results[key] = value
else:
if not ignore_errors:
msg = 'Unknown attribute {} in claim value {}.'.format(key, name)
raise ValueError(msg)
return results
def _visit_handlers(handlers, visitor, prefix, suffixes):
""" Use visitor partern to collect information from handlers """
results = []
for handler in handlers:
for suffix in suffixes:
func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)
if func:
results.append(visitor(suffix, func))
return results
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/collect.py
|
_validate_claim_values
|
python
|
def _validate_claim_values(name, value, ignore_errors):
results = {'essential': False}
for key, value in value.iteritems():
if key in CLAIM_REQUEST_FIELDS:
results[key] = value
else:
if not ignore_errors:
msg = 'Unknown attribute {} in claim value {}.'.format(key, name)
raise ValueError(msg)
return results
|
Helper for `validate_claim_request`
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/collect.py#L167-L177
| null |
"""
Functions to collect OpenID Connect values from claim handlers.
For details on the format of the claim handlers, see
:mod:`oauth2_provider.oicd.handlers`
None: The functions in this module assume the `openid` scope is implied.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import provider.scope
REQUIRED_SCOPES = ['openid']
CLAIM_REQUEST_FIELDS = ['value', 'values', 'essential']
def collect(handlers, access_token, scope_request=None, claims_request=None):
"""
Collect all the claims values from the `handlers`.
Arguments:
handlers (list): List of claim :class:`Handler` classes.
access_token (:class:AccessToken): Associated access token.
scope_request (list): List of requested scopes.
claims_request (dict): Dictionary with only the relevant section of a
OpenID Connect claims request.
Returns a list of the scopes from `scope_request` that are authorized, and a
dictionary of the claims associated with the authorized scopes in
`scope_request`, and additionally, the authorized claims listed in
`claims_request`.
"""
user = access_token.user
client = access_token.client
# Instantiate handlers. Each handler is instanciated only once, allowing the
# handler to keep state in-between calls to its scope and claim methods.
handlers = [cls() for cls in handlers]
# Find all authorized scopes by including the access_token scopes. Note
# that the handlers determine if a scope is authorized, not its presense in
# the access_token.
required_scopes = set(REQUIRED_SCOPES)
token_scopes = set(provider.scope.to_names(access_token.scope))
authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client)
# Select only the authorized scopes from the requested scopes.
scope_request = set(scope_request) if scope_request else set()
scopes = required_scopes | (authorized_scopes & scope_request)
# Find all authorized claims names for the authorized_scopes.
authorized_names = _collect_names(handlers, authorized_scopes, user, client)
# Select only the requested claims if no scope has been requested. Selecting
# scopes has prevalence over selecting claims.
claims_request = _validate_claim_request(claims_request)
# Add the requested claims that are authorized to the response.
requested_names = set(claims_request.keys()) & authorized_names
names = _collect_names(handlers, scopes, user, client) | requested_names
# Get the values for the claims.
claims = _collect_values(
handlers,
names=names,
user=user,
client=client,
values=claims_request or {}
)
return authorized_scopes, claims
def _collect_scopes(handlers, scopes, user, client):
""" Get a set of all the authorized scopes according to the handlers. """
results = set()
data = {'user': user, 'client': client}
def visitor(scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.add(scope_name)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_names(handlers, scopes, user, client):
""" Get the names of the claims supported by the handlers for the requested scope. """
results = set()
data = {'user': user, 'client': client}
def visitor(_scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.update(claim_names)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_values(handlers, names, user, client, values):
""" Get the values from the handlers of the requested claims. """
results = {}
def visitor(claim_name, func):
data = {'user': user, 'client': client}
data.update(values.get(claim_name) or {})
claim_value = func(data)
# If the claim_value is None, it means that the claim is not authorized.
if claim_value is not None:
# New values overwrite previous results
results[claim_name] = claim_value
_visit_handlers(handlers, visitor, 'claim', names)
return results
def _validate_claim_request(claims, ignore_errors=False):
"""
Validates a claim request section (`userinfo` or `id_token`) according
to section 5.5 of the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns a copy of the claim request with only the valid fields and values.
Raises ValueError is the claim request is invalid and `ignore_errors` is False
"""
results = {}
claims = claims if claims else {}
for name, value in claims.iteritems():
if value is None:
results[name] = None
elif isinstance(value, dict):
results[name] = _validate_claim_values(name, value, ignore_errors)
else:
if not ignore_errors:
msg = 'Invalid claim {}.'.format(name)
raise ValueError(msg)
return results
def _visit_handlers(handlers, visitor, prefix, suffixes):
""" Use visitor partern to collect information from handlers """
results = []
for handler in handlers:
for suffix in suffixes:
func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)
if func:
results.append(visitor(suffix, func))
return results
|
edx/edx-oauth2-provider
|
edx_oauth2_provider/oidc/collect.py
|
_visit_handlers
|
python
|
def _visit_handlers(handlers, visitor, prefix, suffixes):
results = []
for handler in handlers:
for suffix in suffixes:
func = getattr(handler, '{}_{}'.format(prefix, suffix).lower(), None)
if func:
results.append(visitor(suffix, func))
return results
|
Use visitor partern to collect information from handlers
|
train
|
https://github.com/edx/edx-oauth2-provider/blob/73e7569a8369e74c345022ccba634365e24befab/edx_oauth2_provider/oidc/collect.py#L180-L190
| null |
"""
Functions to collect OpenID Connect values from claim handlers.
For details on the format of the claim handlers, see
:mod:`oauth2_provider.oicd.handlers`
None: The functions in this module assume the `openid` scope is implied.
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import, division, print_function, unicode_literals
import provider.scope
REQUIRED_SCOPES = ['openid']
CLAIM_REQUEST_FIELDS = ['value', 'values', 'essential']
def collect(handlers, access_token, scope_request=None, claims_request=None):
"""
Collect all the claims values from the `handlers`.
Arguments:
handlers (list): List of claim :class:`Handler` classes.
access_token (:class:AccessToken): Associated access token.
scope_request (list): List of requested scopes.
claims_request (dict): Dictionary with only the relevant section of a
OpenID Connect claims request.
Returns a list of the scopes from `scope_request` that are authorized, and a
dictionary of the claims associated with the authorized scopes in
`scope_request`, and additionally, the authorized claims listed in
`claims_request`.
"""
user = access_token.user
client = access_token.client
# Instantiate handlers. Each handler is instanciated only once, allowing the
# handler to keep state in-between calls to its scope and claim methods.
handlers = [cls() for cls in handlers]
# Find all authorized scopes by including the access_token scopes. Note
# that the handlers determine if a scope is authorized, not its presense in
# the access_token.
required_scopes = set(REQUIRED_SCOPES)
token_scopes = set(provider.scope.to_names(access_token.scope))
authorized_scopes = _collect_scopes(handlers, required_scopes | token_scopes, user, client)
# Select only the authorized scopes from the requested scopes.
scope_request = set(scope_request) if scope_request else set()
scopes = required_scopes | (authorized_scopes & scope_request)
# Find all authorized claims names for the authorized_scopes.
authorized_names = _collect_names(handlers, authorized_scopes, user, client)
# Select only the requested claims if no scope has been requested. Selecting
# scopes has prevalence over selecting claims.
claims_request = _validate_claim_request(claims_request)
# Add the requested claims that are authorized to the response.
requested_names = set(claims_request.keys()) & authorized_names
names = _collect_names(handlers, scopes, user, client) | requested_names
# Get the values for the claims.
claims = _collect_values(
handlers,
names=names,
user=user,
client=client,
values=claims_request or {}
)
return authorized_scopes, claims
def _collect_scopes(handlers, scopes, user, client):
""" Get a set of all the authorized scopes according to the handlers. """
results = set()
data = {'user': user, 'client': client}
def visitor(scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.add(scope_name)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_names(handlers, scopes, user, client):
""" Get the names of the claims supported by the handlers for the requested scope. """
results = set()
data = {'user': user, 'client': client}
def visitor(_scope_name, func):
claim_names = func(data)
# If the claim_names is None, it means that the scope is not authorized.
if claim_names is not None:
results.update(claim_names)
_visit_handlers(handlers, visitor, 'scope', scopes)
return results
def _collect_values(handlers, names, user, client, values):
""" Get the values from the handlers of the requested claims. """
results = {}
def visitor(claim_name, func):
data = {'user': user, 'client': client}
data.update(values.get(claim_name) or {})
claim_value = func(data)
# If the claim_value is None, it means that the claim is not authorized.
if claim_value is not None:
# New values overwrite previous results
results[claim_name] = claim_value
_visit_handlers(handlers, visitor, 'claim', names)
return results
def _validate_claim_request(claims, ignore_errors=False):
"""
Validates a claim request section (`userinfo` or `id_token`) according
to section 5.5 of the OpenID Connect specification:
- http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
Returns a copy of the claim request with only the valid fields and values.
Raises ValueError is the claim request is invalid and `ignore_errors` is False
"""
results = {}
claims = claims if claims else {}
for name, value in claims.iteritems():
if value is None:
results[name] = None
elif isinstance(value, dict):
results[name] = _validate_claim_values(name, value, ignore_errors)
else:
if not ignore_errors:
msg = 'Invalid claim {}.'.format(name)
raise ValueError(msg)
return results
def _validate_claim_values(name, value, ignore_errors):
""" Helper for `validate_claim_request` """
results = {'essential': False}
for key, value in value.iteritems():
if key in CLAIM_REQUEST_FIELDS:
results[key] = value
else:
if not ignore_errors:
msg = 'Unknown attribute {} in claim value {}.'.format(key, name)
raise ValueError(msg)
return results
|
eight04/pyAPNG
|
apng/__init__.py
|
parse_chunks
|
python
|
def parse_chunks(b):
# skip signature
i = 8
# yield chunks
while i < len(b):
data_len, = struct.unpack("!I", b[i:i+4])
type_ = b[i+4:i+8].decode("latin-1")
yield Chunk(type_, b[i:i+data_len+12])
i += data_len + 12
|
Parse PNG bytes into multiple chunks.
:arg bytes b: The raw bytes of the PNG file.
:return: A generator yielding :class:`Chunk`.
:rtype: Iterator[Chunk]
|
train
|
https://github.com/eight04/pyAPNG/blob/b4d2927f7892a1de967b5cf57d434ed65f6a017e/apng/__init__.py#L27-L41
| null |
#! python3
"""This is an APNG module, which can create apng file from pngs
Reference:
http://littlesvr.ca/apng/
http://wiki.mozilla.org/APNG_Specification
https://www.w3.org/TR/PNG/
"""
import struct
import binascii
import io
import zlib
from collections import namedtuple
__version__ = "0.3.3"
PNG_SIGN = b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"
# http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.Summary-of-standard-chunks
CHUNK_BEFORE_IDAT = {
"cHRM", "gAMA", "iCCP", "sBIT", "sRGB", "bKGD", "hIST", "tRNS", "pHYs",
"sPLT", "tIME", "PLTE"
}
def parse_chunks(b):
"""Parse PNG bytes into multiple chunks.
:arg bytes b: The raw bytes of the PNG file.
:return: A generator yielding :class:`Chunk`.
:rtype: Iterator[Chunk]
"""
# skip signature
i = 8
# yield chunks
while i < len(b):
data_len, = struct.unpack("!I", b[i:i+4])
type_ = b[i+4:i+8].decode("latin-1")
yield Chunk(type_, b[i:i+data_len+12])
i += data_len + 12
def make_chunk(chunk_type, chunk_data):
"""Create a raw chunk by composing chunk type and data. It
calculates chunk length and CRC for you.
:arg str chunk_type: PNG chunk type.
:arg bytes chunk_data: PNG chunk data, **excluding chunk length, type, and CRC**.
:rtype: bytes
"""
out = struct.pack("!I", len(chunk_data))
chunk_data = chunk_type.encode("latin-1") + chunk_data
out += chunk_data + struct.pack("!I", binascii.crc32(chunk_data) & 0xffffffff)
return out
def make_text_chunk(
type="tEXt", key="Comment", value="",
compression_flag=0, compression_method=0, lang="", translated_key=""):
"""Create a text chunk with a key value pair.
See https://www.w3.org/TR/PNG/#11textinfo for text chunk information.
Usage:
.. code:: python
from apng import APNG, make_text_chunk
im = APNG.open("file.png")
png, control = im.frames[0]
png.chunks.append(make_text_chunk("tEXt", "Comment", "some text"))
im.save("file.png")
:arg str type: Text chunk type: "tEXt", "zTXt", or "iTXt":
tEXt uses Latin-1 characters.
zTXt uses Latin-1 characters, compressed with zlib.
iTXt uses UTF-8 characters.
:arg str key: The key string, 1-79 characters.
:arg str value: The text value. It would be encoded into
:class:`bytes` and compressed if needed.
:arg int compression_flag: The compression flag for iTXt.
:arg int compression_method: The compression method for zTXt and iTXt.
:arg str lang: The language tag for iTXt.
:arg str translated_key: The translated keyword for iTXt.
:rtype: Chunk
"""
# pylint: disable=redefined-builtin
if type == "tEXt":
data = key.encode("latin-1") + b"\0" + value.encode("latin-1")
elif type == "zTXt":
data = (
key.encode("latin-1") + struct.pack("!xb", compression_method) +
zlib.compress(value.encode("latin-1"))
)
elif type == "iTXt":
data = (
key.encode("latin-1") +
struct.pack("!xbb", compression_flag, compression_method) +
lang.encode("latin-1") + b"\0" +
translated_key.encode("utf-8") + b"\0"
)
if compression_flag:
data += zlib.compress(value.encode("utf-8"))
else:
data += value.encode("utf-8")
else:
raise TypeError("unknown type {!r}".format(type))
return Chunk(type, make_chunk(type, data))
def read_file(file):
"""Read ``file`` into ``bytes``.
:arg file type: path-like or file-like
:rtype: bytes
"""
if hasattr(file, "read"):
return file.read()
if hasattr(file, "read_bytes"):
return file.read_bytes()
with open(file, "rb") as f:
return f.read()
def write_file(file, b):
"""Write ``b`` to file ``file``.
:arg file type: path-like or file-like object.
:arg bytes b: The content.
"""
if hasattr(file, "write_bytes"):
file.write_bytes(b)
elif hasattr(file, "write"):
file.write(b)
else:
with open(file, "wb") as f:
f.write(b)
def open_file(file, mode):
"""Open a file.
:arg file: file-like or path-like object.
:arg str mode: ``mode`` argument for :func:`open`.
"""
if hasattr(file, "read"):
return file
if hasattr(file, "open"):
return file.open(mode)
return open(file, mode)
def file_to_png(fp):
"""Convert an image to PNG format with Pillow.
:arg file-like fp: The image file.
:rtype: bytes
"""
import PIL.Image # pylint: disable=import-error
with io.BytesIO() as dest:
PIL.Image.open(fp).save(dest, "PNG", optimize=True)
return dest.getvalue()
class Chunk(namedtuple("Chunk", ["type", "data"])):
"""A namedtuple to represent the PNG chunk.
:arg str type: The chunk type.
:arg bytes data: The raw bytes of the chunk, including chunk length, type,
data, and CRC.
"""
pass
class PNG:
"""Represent a PNG image.
"""
def __init__(self):
self.hdr = None
self.end = None
self.width = None
self.height = None
self.chunks = []
"""A list of :class:`Chunk`. After reading a PNG file, the bytes
are parsed into multiple chunks. You can remove/add chunks into
this array before calling :func:`to_bytes`."""
def init(self):
"""Extract some info from chunks"""
for type_, data in self.chunks:
if type_ == "IHDR":
self.hdr = data
elif type_ == "IEND":
self.end = data
if self.hdr:
# grab w, h info
self.width, self.height = struct.unpack("!II", self.hdr[8:16])
@classmethod
def open(cls, file):
"""Open a PNG file.
:arg file: Input file.
:type file: path-like or file-like
:rtype: :class:`PNG`
"""
return cls.from_bytes(read_file(file))
@classmethod
def open_any(cls, file):
"""Open an image file. If the image is not PNG format, it would convert
the image into PNG with Pillow module. If the module is not
installed, :class:`ImportError` would be raised.
:arg file: Input file.
:type file: path-like or file-like
:rtype: :class:`PNG`
"""
with open_file(file, "rb") as f:
header = f.read(8)
f.seek(0)
if header != PNG_SIGN:
b = file_to_png(f)
else:
b = f.read()
return cls.from_bytes(b)
@classmethod
def from_bytes(cls, b):
"""Create :class:`PNG` from raw bytes.
:arg bytes b: The raw bytes of the PNG file.
:rtype: :class:`PNG`
"""
im = cls()
im.chunks = list(parse_chunks(b))
im.init()
return im
@classmethod
def from_chunks(cls, chunks):
"""Construct PNG from raw chunks.
:arg chunks: A list of ``(chunk_type, chunk_raw_data)``. Also see
:func:`chunks`.
:type chunks: list[tuple(str, bytes)]
"""
im = cls()
im.chunks = chunks
im.init()
return im
def to_bytes(self):
"""Convert the entire image to bytes.
:rtype: bytes
"""
chunks = [PNG_SIGN]
chunks.extend(c[1] for c in self.chunks)
return b"".join(chunks)
def save(self, file):
"""Save the entire image to a file.
:arg file: Output file.
:type file: path-like or file-like
"""
write_file(file, self.to_bytes())
class FrameControl:
"""A data class holding fcTL info."""
def __init__(self, width=None, height=None, x_offset=0, y_offset=0,
delay=100, delay_den=1000, depose_op=1, blend_op=0):
"""Parameters are assigned as object members. See
`https://wiki.mozilla.org/APNG_Specification
<https://wiki.mozilla.org/APNG_Specification#.60fcTL.60:_The_Frame_Control_Chunk>`_
for the detail of fcTL.
"""
self.width = width
self.height = height
self.x_offset = x_offset
self.y_offset = y_offset
self.delay = delay
self.delay_den = delay_den
self.depose_op = depose_op
self.blend_op = blend_op
def to_bytes(self):
"""Convert to bytes.
:rtype: bytes
"""
return struct.pack(
"!IIIIHHbb", self.width, self.height, self.x_offset, self.y_offset,
self.delay, self.delay_den, self.depose_op, self.blend_op
)
@classmethod
def from_bytes(cls, b):
"""Contruct fcTL info from bytes.
:arg bytes b: The length of ``b`` must be *28*, excluding sequence
number and CRC.
"""
return cls(*struct.unpack("!IIIIHHbb", b))
class APNG:
"""Represent an APNG image."""
def __init__(self, num_plays=0):
"""An :class:`APNG` is composed by multiple :class:`PNG` s and
:class:`FrameControl`, which can be inserted with :meth:`append`.
:arg int num_plays: Number of times to loop. 0 = infinite.
:var frames: The frames of APNG.
:vartype frames: list[tuple(PNG, FrameControl)]
:var int num_plays: same as ``num_plays``.
"""
self.frames = []
self.num_plays = num_plays
def append(self, png, **options):
"""Append one frame.
:arg PNG png: Append a :class:`PNG` as a frame.
:arg dict options: The options for :class:`FrameControl`.
"""
if not isinstance(png, PNG):
raise TypeError("Expect an instance of `PNG` but got `{}`".format(png))
control = FrameControl(**options)
if control.width is None:
control.width = png.width
if control.height is None:
control.height = png.height
self.frames.append((png, control))
def append_file(self, file, **options):
"""Create a PNG from file and append the PNG as a frame.
:arg file: Input file.
:type file: path-like or file-like.
:arg dict options: The options for :class:`FrameControl`.
"""
self.append(PNG.open_any(file), **options)
def to_bytes(self):
"""Convert the entire image to bytes.
:rtype: bytes
"""
# grab the chunks we needs
out = [PNG_SIGN]
# FIXME: it's tricky to define "other_chunks". HoneyView stop the
# animation if it sees chunks other than fctl or idat, so we put other
# chunks to the end of the file
other_chunks = []
seq = 0
# for first frame
png, control = self.frames[0]
# header
out.append(png.hdr)
# acTL
out.append(make_chunk("acTL", struct.pack("!II", len(self.frames), self.num_plays)))
# fcTL
if control:
out.append(make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes()))
seq += 1
# and others...
idat_chunks = []
for type_, data in png.chunks:
if type_ in ("IHDR", "IEND"):
continue
if type_ == "IDAT":
# put at last
idat_chunks.append(data)
continue
out.append(data)
out.extend(idat_chunks)
# FIXME: we should do some optimization to frames...
# for other frames
for png, control in self.frames[1:]:
# fcTL
out.append(
make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes())
)
seq += 1
# and others...
for type_, data in png.chunks:
if type_ in ("IHDR", "IEND") or type_ in CHUNK_BEFORE_IDAT:
continue
elif type_ == "IDAT":
# convert IDAT to fdAT
out.append(
make_chunk("fdAT", struct.pack("!I", seq) + data[8:-4])
)
seq += 1
else:
other_chunks.append(data)
# end
out.extend(other_chunks)
out.append(png.end)
return b"".join(out)
@classmethod
def from_files(cls, files, **options):
"""Create an APNG from multiple files.
This is a shortcut of::
im = APNG()
for file in files:
im.append_file(file, **options)
:arg list files: A list of filename. See :meth:`PNG.open`.
:arg dict options: Options for :class:`FrameControl`.
:rtype: APNG
"""
im = cls()
for file in files:
im.append_file(file, **options)
return im
@classmethod
def from_bytes(cls, b):
"""Create an APNG from raw bytes.
:arg bytes b: The raw bytes of the APNG file.
:rtype: APNG
"""
hdr = None
head_chunks = []
end = ("IEND", make_chunk("IEND", b""))
frame_chunks = []
frames = []
num_plays = 0
frame_has_head_chunks = False
control = None
for type_, data in parse_chunks(b):
if type_ == "IHDR":
hdr = data
frame_chunks.append((type_, data))
elif type_ == "acTL":
_num_frames, num_plays = struct.unpack("!II", data[8:-4])
continue
elif type_ == "fcTL":
if any(type_ == "IDAT" for type_, data in frame_chunks):
# IDAT inside chunk, go to next frame
frame_chunks.append(end)
frames.append((PNG.from_chunks(frame_chunks), control))
frame_has_head_chunks = False
control = FrameControl.from_bytes(data[12:-4])
# https://github.com/PyCQA/pylint/issues/2072
# pylint: disable=typecheck
hdr = make_chunk("IHDR", struct.pack("!II", control.width, control.height) + hdr[16:-4])
frame_chunks = [("IHDR", hdr)]
else:
control = FrameControl.from_bytes(data[12:-4])
elif type_ == "IDAT":
if not frame_has_head_chunks:
frame_chunks.extend(head_chunks)
frame_has_head_chunks = True
frame_chunks.append((type_, data))
elif type_ == "fdAT":
# convert to IDAT
if not frame_has_head_chunks:
frame_chunks.extend(head_chunks)
frame_has_head_chunks = True
frame_chunks.append(("IDAT", make_chunk("IDAT", data[12:-4])))
elif type_ == "IEND":
# end
frame_chunks.append(end)
frames.append((PNG.from_chunks(frame_chunks), control))
break
elif type_ in CHUNK_BEFORE_IDAT:
head_chunks.append((type_, data))
else:
frame_chunks.append((type_, data))
o = cls()
o.frames = frames
o.num_plays = num_plays
return o
@classmethod
def open(cls, file):
"""Open an APNG file.
:arg file: Input file.
:type file: path-like or file-like.
:rtype: APNG
"""
return cls.from_bytes(read_file(file))
def save(self, file):
"""Save the entire image to a file.
:arg file: Output file.
:type file: path-like or file-like
"""
write_file(file, self.to_bytes())
|
eight04/pyAPNG
|
apng/__init__.py
|
make_chunk
|
python
|
def make_chunk(chunk_type, chunk_data):
out = struct.pack("!I", len(chunk_data))
chunk_data = chunk_type.encode("latin-1") + chunk_data
out += chunk_data + struct.pack("!I", binascii.crc32(chunk_data) & 0xffffffff)
return out
|
Create a raw chunk by composing chunk type and data. It
calculates chunk length and CRC for you.
:arg str chunk_type: PNG chunk type.
:arg bytes chunk_data: PNG chunk data, **excluding chunk length, type, and CRC**.
:rtype: bytes
|
train
|
https://github.com/eight04/pyAPNG/blob/b4d2927f7892a1de967b5cf57d434ed65f6a017e/apng/__init__.py#L43-L54
| null |
#! python3
"""This is an APNG module, which can create apng file from pngs
Reference:
http://littlesvr.ca/apng/
http://wiki.mozilla.org/APNG_Specification
https://www.w3.org/TR/PNG/
"""
import struct
import binascii
import io
import zlib
from collections import namedtuple
__version__ = "0.3.3"
PNG_SIGN = b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"
# http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.Summary-of-standard-chunks
CHUNK_BEFORE_IDAT = {
"cHRM", "gAMA", "iCCP", "sBIT", "sRGB", "bKGD", "hIST", "tRNS", "pHYs",
"sPLT", "tIME", "PLTE"
}
def parse_chunks(b):
"""Parse PNG bytes into multiple chunks.
:arg bytes b: The raw bytes of the PNG file.
:return: A generator yielding :class:`Chunk`.
:rtype: Iterator[Chunk]
"""
# skip signature
i = 8
# yield chunks
while i < len(b):
data_len, = struct.unpack("!I", b[i:i+4])
type_ = b[i+4:i+8].decode("latin-1")
yield Chunk(type_, b[i:i+data_len+12])
i += data_len + 12
def make_text_chunk(
type="tEXt", key="Comment", value="",
compression_flag=0, compression_method=0, lang="", translated_key=""):
"""Create a text chunk with a key value pair.
See https://www.w3.org/TR/PNG/#11textinfo for text chunk information.
Usage:
.. code:: python
from apng import APNG, make_text_chunk
im = APNG.open("file.png")
png, control = im.frames[0]
png.chunks.append(make_text_chunk("tEXt", "Comment", "some text"))
im.save("file.png")
:arg str type: Text chunk type: "tEXt", "zTXt", or "iTXt":
tEXt uses Latin-1 characters.
zTXt uses Latin-1 characters, compressed with zlib.
iTXt uses UTF-8 characters.
:arg str key: The key string, 1-79 characters.
:arg str value: The text value. It would be encoded into
:class:`bytes` and compressed if needed.
:arg int compression_flag: The compression flag for iTXt.
:arg int compression_method: The compression method for zTXt and iTXt.
:arg str lang: The language tag for iTXt.
:arg str translated_key: The translated keyword for iTXt.
:rtype: Chunk
"""
# pylint: disable=redefined-builtin
if type == "tEXt":
data = key.encode("latin-1") + b"\0" + value.encode("latin-1")
elif type == "zTXt":
data = (
key.encode("latin-1") + struct.pack("!xb", compression_method) +
zlib.compress(value.encode("latin-1"))
)
elif type == "iTXt":
data = (
key.encode("latin-1") +
struct.pack("!xbb", compression_flag, compression_method) +
lang.encode("latin-1") + b"\0" +
translated_key.encode("utf-8") + b"\0"
)
if compression_flag:
data += zlib.compress(value.encode("utf-8"))
else:
data += value.encode("utf-8")
else:
raise TypeError("unknown type {!r}".format(type))
return Chunk(type, make_chunk(type, data))
def read_file(file):
"""Read ``file`` into ``bytes``.
:arg file type: path-like or file-like
:rtype: bytes
"""
if hasattr(file, "read"):
return file.read()
if hasattr(file, "read_bytes"):
return file.read_bytes()
with open(file, "rb") as f:
return f.read()
def write_file(file, b):
"""Write ``b`` to file ``file``.
:arg file type: path-like or file-like object.
:arg bytes b: The content.
"""
if hasattr(file, "write_bytes"):
file.write_bytes(b)
elif hasattr(file, "write"):
file.write(b)
else:
with open(file, "wb") as f:
f.write(b)
def open_file(file, mode):
"""Open a file.
:arg file: file-like or path-like object.
:arg str mode: ``mode`` argument for :func:`open`.
"""
if hasattr(file, "read"):
return file
if hasattr(file, "open"):
return file.open(mode)
return open(file, mode)
def file_to_png(fp):
"""Convert an image to PNG format with Pillow.
:arg file-like fp: The image file.
:rtype: bytes
"""
import PIL.Image # pylint: disable=import-error
with io.BytesIO() as dest:
PIL.Image.open(fp).save(dest, "PNG", optimize=True)
return dest.getvalue()
class Chunk(namedtuple("Chunk", ["type", "data"])):
"""A namedtuple to represent the PNG chunk.
:arg str type: The chunk type.
:arg bytes data: The raw bytes of the chunk, including chunk length, type,
data, and CRC.
"""
pass
class PNG:
"""Represent a PNG image.
"""
def __init__(self):
self.hdr = None
self.end = None
self.width = None
self.height = None
self.chunks = []
"""A list of :class:`Chunk`. After reading a PNG file, the bytes
are parsed into multiple chunks. You can remove/add chunks into
this array before calling :func:`to_bytes`."""
def init(self):
"""Extract some info from chunks"""
for type_, data in self.chunks:
if type_ == "IHDR":
self.hdr = data
elif type_ == "IEND":
self.end = data
if self.hdr:
# grab w, h info
self.width, self.height = struct.unpack("!II", self.hdr[8:16])
@classmethod
def open(cls, file):
"""Open a PNG file.
:arg file: Input file.
:type file: path-like or file-like
:rtype: :class:`PNG`
"""
return cls.from_bytes(read_file(file))
@classmethod
def open_any(cls, file):
"""Open an image file. If the image is not PNG format, it would convert
the image into PNG with Pillow module. If the module is not
installed, :class:`ImportError` would be raised.
:arg file: Input file.
:type file: path-like or file-like
:rtype: :class:`PNG`
"""
with open_file(file, "rb") as f:
header = f.read(8)
f.seek(0)
if header != PNG_SIGN:
b = file_to_png(f)
else:
b = f.read()
return cls.from_bytes(b)
@classmethod
def from_bytes(cls, b):
"""Create :class:`PNG` from raw bytes.
:arg bytes b: The raw bytes of the PNG file.
:rtype: :class:`PNG`
"""
im = cls()
im.chunks = list(parse_chunks(b))
im.init()
return im
@classmethod
def from_chunks(cls, chunks):
"""Construct PNG from raw chunks.
:arg chunks: A list of ``(chunk_type, chunk_raw_data)``. Also see
:func:`chunks`.
:type chunks: list[tuple(str, bytes)]
"""
im = cls()
im.chunks = chunks
im.init()
return im
def to_bytes(self):
"""Convert the entire image to bytes.
:rtype: bytes
"""
chunks = [PNG_SIGN]
chunks.extend(c[1] for c in self.chunks)
return b"".join(chunks)
def save(self, file):
"""Save the entire image to a file.
:arg file: Output file.
:type file: path-like or file-like
"""
write_file(file, self.to_bytes())
class FrameControl:
"""A data class holding fcTL info."""
def __init__(self, width=None, height=None, x_offset=0, y_offset=0,
delay=100, delay_den=1000, depose_op=1, blend_op=0):
"""Parameters are assigned as object members. See
`https://wiki.mozilla.org/APNG_Specification
<https://wiki.mozilla.org/APNG_Specification#.60fcTL.60:_The_Frame_Control_Chunk>`_
for the detail of fcTL.
"""
self.width = width
self.height = height
self.x_offset = x_offset
self.y_offset = y_offset
self.delay = delay
self.delay_den = delay_den
self.depose_op = depose_op
self.blend_op = blend_op
def to_bytes(self):
"""Convert to bytes.
:rtype: bytes
"""
return struct.pack(
"!IIIIHHbb", self.width, self.height, self.x_offset, self.y_offset,
self.delay, self.delay_den, self.depose_op, self.blend_op
)
@classmethod
def from_bytes(cls, b):
"""Contruct fcTL info from bytes.
:arg bytes b: The length of ``b`` must be *28*, excluding sequence
number and CRC.
"""
return cls(*struct.unpack("!IIIIHHbb", b))
class APNG:
"""Represent an APNG image."""
def __init__(self, num_plays=0):
"""An :class:`APNG` is composed by multiple :class:`PNG` s and
:class:`FrameControl`, which can be inserted with :meth:`append`.
:arg int num_plays: Number of times to loop. 0 = infinite.
:var frames: The frames of APNG.
:vartype frames: list[tuple(PNG, FrameControl)]
:var int num_plays: same as ``num_plays``.
"""
self.frames = []
self.num_plays = num_plays
def append(self, png, **options):
"""Append one frame.
:arg PNG png: Append a :class:`PNG` as a frame.
:arg dict options: The options for :class:`FrameControl`.
"""
if not isinstance(png, PNG):
raise TypeError("Expect an instance of `PNG` but got `{}`".format(png))
control = FrameControl(**options)
if control.width is None:
control.width = png.width
if control.height is None:
control.height = png.height
self.frames.append((png, control))
def append_file(self, file, **options):
"""Create a PNG from file and append the PNG as a frame.
:arg file: Input file.
:type file: path-like or file-like.
:arg dict options: The options for :class:`FrameControl`.
"""
self.append(PNG.open_any(file), **options)
def to_bytes(self):
"""Convert the entire image to bytes.
:rtype: bytes
"""
# grab the chunks we needs
out = [PNG_SIGN]
# FIXME: it's tricky to define "other_chunks". HoneyView stop the
# animation if it sees chunks other than fctl or idat, so we put other
# chunks to the end of the file
other_chunks = []
seq = 0
# for first frame
png, control = self.frames[0]
# header
out.append(png.hdr)
# acTL
out.append(make_chunk("acTL", struct.pack("!II", len(self.frames), self.num_plays)))
# fcTL
if control:
out.append(make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes()))
seq += 1
# and others...
idat_chunks = []
for type_, data in png.chunks:
if type_ in ("IHDR", "IEND"):
continue
if type_ == "IDAT":
# put at last
idat_chunks.append(data)
continue
out.append(data)
out.extend(idat_chunks)
# FIXME: we should do some optimization to frames...
# for other frames
for png, control in self.frames[1:]:
# fcTL
out.append(
make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes())
)
seq += 1
# and others...
for type_, data in png.chunks:
if type_ in ("IHDR", "IEND") or type_ in CHUNK_BEFORE_IDAT:
continue
elif type_ == "IDAT":
# convert IDAT to fdAT
out.append(
make_chunk("fdAT", struct.pack("!I", seq) + data[8:-4])
)
seq += 1
else:
other_chunks.append(data)
# end
out.extend(other_chunks)
out.append(png.end)
return b"".join(out)
@classmethod
def from_files(cls, files, **options):
"""Create an APNG from multiple files.
This is a shortcut of::
im = APNG()
for file in files:
im.append_file(file, **options)
:arg list files: A list of filename. See :meth:`PNG.open`.
:arg dict options: Options for :class:`FrameControl`.
:rtype: APNG
"""
im = cls()
for file in files:
im.append_file(file, **options)
return im
@classmethod
def from_bytes(cls, b):
"""Create an APNG from raw bytes.
:arg bytes b: The raw bytes of the APNG file.
:rtype: APNG
"""
hdr = None
head_chunks = []
end = ("IEND", make_chunk("IEND", b""))
frame_chunks = []
frames = []
num_plays = 0
frame_has_head_chunks = False
control = None
for type_, data in parse_chunks(b):
if type_ == "IHDR":
hdr = data
frame_chunks.append((type_, data))
elif type_ == "acTL":
_num_frames, num_plays = struct.unpack("!II", data[8:-4])
continue
elif type_ == "fcTL":
if any(type_ == "IDAT" for type_, data in frame_chunks):
# IDAT inside chunk, go to next frame
frame_chunks.append(end)
frames.append((PNG.from_chunks(frame_chunks), control))
frame_has_head_chunks = False
control = FrameControl.from_bytes(data[12:-4])
# https://github.com/PyCQA/pylint/issues/2072
# pylint: disable=typecheck
hdr = make_chunk("IHDR", struct.pack("!II", control.width, control.height) + hdr[16:-4])
frame_chunks = [("IHDR", hdr)]
else:
control = FrameControl.from_bytes(data[12:-4])
elif type_ == "IDAT":
if not frame_has_head_chunks:
frame_chunks.extend(head_chunks)
frame_has_head_chunks = True
frame_chunks.append((type_, data))
elif type_ == "fdAT":
# convert to IDAT
if not frame_has_head_chunks:
frame_chunks.extend(head_chunks)
frame_has_head_chunks = True
frame_chunks.append(("IDAT", make_chunk("IDAT", data[12:-4])))
elif type_ == "IEND":
# end
frame_chunks.append(end)
frames.append((PNG.from_chunks(frame_chunks), control))
break
elif type_ in CHUNK_BEFORE_IDAT:
head_chunks.append((type_, data))
else:
frame_chunks.append((type_, data))
o = cls()
o.frames = frames
o.num_plays = num_plays
return o
@classmethod
def open(cls, file):
"""Open an APNG file.
:arg file: Input file.
:type file: path-like or file-like.
:rtype: APNG
"""
return cls.from_bytes(read_file(file))
def save(self, file):
"""Save the entire image to a file.
:arg file: Output file.
:type file: path-like or file-like
"""
write_file(file, self.to_bytes())
|
eight04/pyAPNG
|
apng/__init__.py
|
make_text_chunk
|
python
|
def make_text_chunk(
type="tEXt", key="Comment", value="",
compression_flag=0, compression_method=0, lang="", translated_key=""):
# pylint: disable=redefined-builtin
if type == "tEXt":
data = key.encode("latin-1") + b"\0" + value.encode("latin-1")
elif type == "zTXt":
data = (
key.encode("latin-1") + struct.pack("!xb", compression_method) +
zlib.compress(value.encode("latin-1"))
)
elif type == "iTXt":
data = (
key.encode("latin-1") +
struct.pack("!xbb", compression_flag, compression_method) +
lang.encode("latin-1") + b"\0" +
translated_key.encode("utf-8") + b"\0"
)
if compression_flag:
data += zlib.compress(value.encode("utf-8"))
else:
data += value.encode("utf-8")
else:
raise TypeError("unknown type {!r}".format(type))
return Chunk(type, make_chunk(type, data))
|
Create a text chunk with a key value pair.
See https://www.w3.org/TR/PNG/#11textinfo for text chunk information.
Usage:
.. code:: python
from apng import APNG, make_text_chunk
im = APNG.open("file.png")
png, control = im.frames[0]
png.chunks.append(make_text_chunk("tEXt", "Comment", "some text"))
im.save("file.png")
:arg str type: Text chunk type: "tEXt", "zTXt", or "iTXt":
tEXt uses Latin-1 characters.
zTXt uses Latin-1 characters, compressed with zlib.
iTXt uses UTF-8 characters.
:arg str key: The key string, 1-79 characters.
:arg str value: The text value. It would be encoded into
:class:`bytes` and compressed if needed.
:arg int compression_flag: The compression flag for iTXt.
:arg int compression_method: The compression method for zTXt and iTXt.
:arg str lang: The language tag for iTXt.
:arg str translated_key: The translated keyword for iTXt.
:rtype: Chunk
|
train
|
https://github.com/eight04/pyAPNG/blob/b4d2927f7892a1de967b5cf57d434ed65f6a017e/apng/__init__.py#L56-L111
|
[
"def make_chunk(chunk_type, chunk_data):\n\t\"\"\"Create a raw chunk by composing chunk type and data. It\n\tcalculates chunk length and CRC for you.\n\n\t:arg str chunk_type: PNG chunk type.\n\t:arg bytes chunk_data: PNG chunk data, **excluding chunk length, type, and CRC**.\n\t:rtype: bytes\n\t\"\"\"\n\tout = struct.pack(\"!I\", len(chunk_data))\n\tchunk_data = chunk_type.encode(\"latin-1\") + chunk_data\n\tout += chunk_data + struct.pack(\"!I\", binascii.crc32(chunk_data) & 0xffffffff)\n\treturn out\n"
] |
#! python3
"""This is an APNG module, which can create apng file from pngs
Reference:
http://littlesvr.ca/apng/
http://wiki.mozilla.org/APNG_Specification
https://www.w3.org/TR/PNG/
"""
import struct
import binascii
import io
import zlib
from collections import namedtuple
__version__ = "0.3.3"
PNG_SIGN = b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"
# http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.Summary-of-standard-chunks
CHUNK_BEFORE_IDAT = {
"cHRM", "gAMA", "iCCP", "sBIT", "sRGB", "bKGD", "hIST", "tRNS", "pHYs",
"sPLT", "tIME", "PLTE"
}
def parse_chunks(b):
"""Parse PNG bytes into multiple chunks.
:arg bytes b: The raw bytes of the PNG file.
:return: A generator yielding :class:`Chunk`.
:rtype: Iterator[Chunk]
"""
# skip signature
i = 8
# yield chunks
while i < len(b):
data_len, = struct.unpack("!I", b[i:i+4])
type_ = b[i+4:i+8].decode("latin-1")
yield Chunk(type_, b[i:i+data_len+12])
i += data_len + 12
def make_chunk(chunk_type, chunk_data):
"""Create a raw chunk by composing chunk type and data. It
calculates chunk length and CRC for you.
:arg str chunk_type: PNG chunk type.
:arg bytes chunk_data: PNG chunk data, **excluding chunk length, type, and CRC**.
:rtype: bytes
"""
out = struct.pack("!I", len(chunk_data))
chunk_data = chunk_type.encode("latin-1") + chunk_data
out += chunk_data + struct.pack("!I", binascii.crc32(chunk_data) & 0xffffffff)
return out
def make_text_chunk(
type="tEXt", key="Comment", value="",
compression_flag=0, compression_method=0, lang="", translated_key=""):
"""Create a text chunk with a key value pair.
See https://www.w3.org/TR/PNG/#11textinfo for text chunk information.
Usage:
.. code:: python
from apng import APNG, make_text_chunk
im = APNG.open("file.png")
png, control = im.frames[0]
png.chunks.append(make_text_chunk("tEXt", "Comment", "some text"))
im.save("file.png")
:arg str type: Text chunk type: "tEXt", "zTXt", or "iTXt":
tEXt uses Latin-1 characters.
zTXt uses Latin-1 characters, compressed with zlib.
iTXt uses UTF-8 characters.
:arg str key: The key string, 1-79 characters.
:arg str value: The text value. It would be encoded into
:class:`bytes` and compressed if needed.
:arg int compression_flag: The compression flag for iTXt.
:arg int compression_method: The compression method for zTXt and iTXt.
:arg str lang: The language tag for iTXt.
:arg str translated_key: The translated keyword for iTXt.
:rtype: Chunk
"""
# pylint: disable=redefined-builtin
if type == "tEXt":
data = key.encode("latin-1") + b"\0" + value.encode("latin-1")
elif type == "zTXt":
data = (
key.encode("latin-1") + struct.pack("!xb", compression_method) +
zlib.compress(value.encode("latin-1"))
)
elif type == "iTXt":
data = (
key.encode("latin-1") +
struct.pack("!xbb", compression_flag, compression_method) +
lang.encode("latin-1") + b"\0" +
translated_key.encode("utf-8") + b"\0"
)
if compression_flag:
data += zlib.compress(value.encode("utf-8"))
else:
data += value.encode("utf-8")
else:
raise TypeError("unknown type {!r}".format(type))
return Chunk(type, make_chunk(type, data))
def read_file(file):
"""Read ``file`` into ``bytes``.
:arg file type: path-like or file-like
:rtype: bytes
"""
if hasattr(file, "read"):
return file.read()
if hasattr(file, "read_bytes"):
return file.read_bytes()
with open(file, "rb") as f:
return f.read()
def write_file(file, b):
"""Write ``b`` to file ``file``.
:arg file type: path-like or file-like object.
:arg bytes b: The content.
"""
if hasattr(file, "write_bytes"):
file.write_bytes(b)
elif hasattr(file, "write"):
file.write(b)
else:
with open(file, "wb") as f:
f.write(b)
def open_file(file, mode):
"""Open a file.
:arg file: file-like or path-like object.
:arg str mode: ``mode`` argument for :func:`open`.
"""
if hasattr(file, "read"):
return file
if hasattr(file, "open"):
return file.open(mode)
return open(file, mode)
def file_to_png(fp):
"""Convert an image to PNG format with Pillow.
:arg file-like fp: The image file.
:rtype: bytes
"""
import PIL.Image # pylint: disable=import-error
with io.BytesIO() as dest:
PIL.Image.open(fp).save(dest, "PNG", optimize=True)
return dest.getvalue()
class Chunk(namedtuple("Chunk", ["type", "data"])):
"""A namedtuple to represent the PNG chunk.
:arg str type: The chunk type.
:arg bytes data: The raw bytes of the chunk, including chunk length, type,
data, and CRC.
"""
pass
class PNG:
"""Represent a PNG image.
"""
def __init__(self):
self.hdr = None
self.end = None
self.width = None
self.height = None
self.chunks = []
"""A list of :class:`Chunk`. After reading a PNG file, the bytes
are parsed into multiple chunks. You can remove/add chunks into
this array before calling :func:`to_bytes`."""
def init(self):
"""Extract some info from chunks"""
for type_, data in self.chunks:
if type_ == "IHDR":
self.hdr = data
elif type_ == "IEND":
self.end = data
if self.hdr:
# grab w, h info
self.width, self.height = struct.unpack("!II", self.hdr[8:16])
@classmethod
def open(cls, file):
"""Open a PNG file.
:arg file: Input file.
:type file: path-like or file-like
:rtype: :class:`PNG`
"""
return cls.from_bytes(read_file(file))
@classmethod
def open_any(cls, file):
"""Open an image file. If the image is not PNG format, it would convert
the image into PNG with Pillow module. If the module is not
installed, :class:`ImportError` would be raised.
:arg file: Input file.
:type file: path-like or file-like
:rtype: :class:`PNG`
"""
with open_file(file, "rb") as f:
header = f.read(8)
f.seek(0)
if header != PNG_SIGN:
b = file_to_png(f)
else:
b = f.read()
return cls.from_bytes(b)
@classmethod
def from_bytes(cls, b):
"""Create :class:`PNG` from raw bytes.
:arg bytes b: The raw bytes of the PNG file.
:rtype: :class:`PNG`
"""
im = cls()
im.chunks = list(parse_chunks(b))
im.init()
return im
@classmethod
def from_chunks(cls, chunks):
"""Construct PNG from raw chunks.
:arg chunks: A list of ``(chunk_type, chunk_raw_data)``. Also see
:func:`chunks`.
:type chunks: list[tuple(str, bytes)]
"""
im = cls()
im.chunks = chunks
im.init()
return im
def to_bytes(self):
"""Convert the entire image to bytes.
:rtype: bytes
"""
chunks = [PNG_SIGN]
chunks.extend(c[1] for c in self.chunks)
return b"".join(chunks)
def save(self, file):
"""Save the entire image to a file.
:arg file: Output file.
:type file: path-like or file-like
"""
write_file(file, self.to_bytes())
class FrameControl:
"""A data class holding fcTL info."""
def __init__(self, width=None, height=None, x_offset=0, y_offset=0,
delay=100, delay_den=1000, depose_op=1, blend_op=0):
"""Parameters are assigned as object members. See
`https://wiki.mozilla.org/APNG_Specification
<https://wiki.mozilla.org/APNG_Specification#.60fcTL.60:_The_Frame_Control_Chunk>`_
for the detail of fcTL.
"""
self.width = width
self.height = height
self.x_offset = x_offset
self.y_offset = y_offset
self.delay = delay
self.delay_den = delay_den
self.depose_op = depose_op
self.blend_op = blend_op
def to_bytes(self):
"""Convert to bytes.
:rtype: bytes
"""
return struct.pack(
"!IIIIHHbb", self.width, self.height, self.x_offset, self.y_offset,
self.delay, self.delay_den, self.depose_op, self.blend_op
)
@classmethod
def from_bytes(cls, b):
"""Contruct fcTL info from bytes.
:arg bytes b: The length of ``b`` must be *28*, excluding sequence
number and CRC.
"""
return cls(*struct.unpack("!IIIIHHbb", b))
class APNG:
"""Represent an APNG image."""
def __init__(self, num_plays=0):
"""An :class:`APNG` is composed by multiple :class:`PNG` s and
:class:`FrameControl`, which can be inserted with :meth:`append`.
:arg int num_plays: Number of times to loop. 0 = infinite.
:var frames: The frames of APNG.
:vartype frames: list[tuple(PNG, FrameControl)]
:var int num_plays: same as ``num_plays``.
"""
self.frames = []
self.num_plays = num_plays
def append(self, png, **options):
"""Append one frame.
:arg PNG png: Append a :class:`PNG` as a frame.
:arg dict options: The options for :class:`FrameControl`.
"""
if not isinstance(png, PNG):
raise TypeError("Expect an instance of `PNG` but got `{}`".format(png))
control = FrameControl(**options)
if control.width is None:
control.width = png.width
if control.height is None:
control.height = png.height
self.frames.append((png, control))
def append_file(self, file, **options):
"""Create a PNG from file and append the PNG as a frame.
:arg file: Input file.
:type file: path-like or file-like.
:arg dict options: The options for :class:`FrameControl`.
"""
self.append(PNG.open_any(file), **options)
def to_bytes(self):
"""Convert the entire image to bytes.
:rtype: bytes
"""
# grab the chunks we needs
out = [PNG_SIGN]
# FIXME: it's tricky to define "other_chunks". HoneyView stop the
# animation if it sees chunks other than fctl or idat, so we put other
# chunks to the end of the file
other_chunks = []
seq = 0
# for first frame
png, control = self.frames[0]
# header
out.append(png.hdr)
# acTL
out.append(make_chunk("acTL", struct.pack("!II", len(self.frames), self.num_plays)))
# fcTL
if control:
out.append(make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes()))
seq += 1
# and others...
idat_chunks = []
for type_, data in png.chunks:
if type_ in ("IHDR", "IEND"):
continue
if type_ == "IDAT":
# put at last
idat_chunks.append(data)
continue
out.append(data)
out.extend(idat_chunks)
# FIXME: we should do some optimization to frames...
# for other frames
for png, control in self.frames[1:]:
# fcTL
out.append(
make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes())
)
seq += 1
# and others...
for type_, data in png.chunks:
if type_ in ("IHDR", "IEND") or type_ in CHUNK_BEFORE_IDAT:
continue
elif type_ == "IDAT":
# convert IDAT to fdAT
out.append(
make_chunk("fdAT", struct.pack("!I", seq) + data[8:-4])
)
seq += 1
else:
other_chunks.append(data)
# end
out.extend(other_chunks)
out.append(png.end)
return b"".join(out)
@classmethod
def from_files(cls, files, **options):
"""Create an APNG from multiple files.
This is a shortcut of::
im = APNG()
for file in files:
im.append_file(file, **options)
:arg list files: A list of filename. See :meth:`PNG.open`.
:arg dict options: Options for :class:`FrameControl`.
:rtype: APNG
"""
im = cls()
for file in files:
im.append_file(file, **options)
return im
@classmethod
def from_bytes(cls, b):
"""Create an APNG from raw bytes.
:arg bytes b: The raw bytes of the APNG file.
:rtype: APNG
"""
hdr = None
head_chunks = []
end = ("IEND", make_chunk("IEND", b""))
frame_chunks = []
frames = []
num_plays = 0
frame_has_head_chunks = False
control = None
for type_, data in parse_chunks(b):
if type_ == "IHDR":
hdr = data
frame_chunks.append((type_, data))
elif type_ == "acTL":
_num_frames, num_plays = struct.unpack("!II", data[8:-4])
continue
elif type_ == "fcTL":
if any(type_ == "IDAT" for type_, data in frame_chunks):
# IDAT inside chunk, go to next frame
frame_chunks.append(end)
frames.append((PNG.from_chunks(frame_chunks), control))
frame_has_head_chunks = False
control = FrameControl.from_bytes(data[12:-4])
# https://github.com/PyCQA/pylint/issues/2072
# pylint: disable=typecheck
hdr = make_chunk("IHDR", struct.pack("!II", control.width, control.height) + hdr[16:-4])
frame_chunks = [("IHDR", hdr)]
else:
control = FrameControl.from_bytes(data[12:-4])
elif type_ == "IDAT":
if not frame_has_head_chunks:
frame_chunks.extend(head_chunks)
frame_has_head_chunks = True
frame_chunks.append((type_, data))
elif type_ == "fdAT":
# convert to IDAT
if not frame_has_head_chunks:
frame_chunks.extend(head_chunks)
frame_has_head_chunks = True
frame_chunks.append(("IDAT", make_chunk("IDAT", data[12:-4])))
elif type_ == "IEND":
# end
frame_chunks.append(end)
frames.append((PNG.from_chunks(frame_chunks), control))
break
elif type_ in CHUNK_BEFORE_IDAT:
head_chunks.append((type_, data))
else:
frame_chunks.append((type_, data))
o = cls()
o.frames = frames
o.num_plays = num_plays
return o
@classmethod
def open(cls, file):
"""Open an APNG file.
:arg file: Input file.
:type file: path-like or file-like.
:rtype: APNG
"""
return cls.from_bytes(read_file(file))
def save(self, file):
"""Save the entire image to a file.
:arg file: Output file.
:type file: path-like or file-like
"""
write_file(file, self.to_bytes())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.