repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
fengbaicanhe/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/pytree.py | 325 | 29039 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""
Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
__author__ = "Guido van Rossum <guido@python.org>"
import sys
import warnings
from StringIO import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
class Base(object):
"""
Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
was_checked = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""
Compare two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
__hash__ = None # For Py3 compatibility.
def __ne__(self, other):
"""
Compare two nodes for inequality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
def _eq(self, other):
"""
Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self):
"""
Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""
Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def set_prefix(self, prefix):
"""
Set the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn("set_prefix() is deprecated; use the prefix property",
DeprecationWarning, stacklevel=2)
self.prefix = prefix
def get_prefix(self):
"""
Return the prefix for the node (see Leaf class).
DEPRECATED; use the prefix property directly.
"""
warnings.warn("get_prefix() is deprecated; use the prefix property",
DeprecationWarning, stacklevel=2)
return self.prefix
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""
Remove the node from the tree. Returns the position of the node in its
parent's children before it was removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
@property
def next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
@property
def prev_sibling(self):
"""
The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def leaves(self):
for child in self.children:
for x in child.leaves():
yield x
def depth(self):
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self):
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return u""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
return unicode(self).encode("ascii")
class Node(Base):
"""Concrete implementation for interior nodes."""
def __init__(self,type, children,
context=None,
prefix=None,
fixers_applied=None):
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
if prefix is not None:
self.prefix = prefix
if fixers_applied:
self.fixers_applied = fixers_applied[:]
else:
self.fixers_applied = None
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return u"".join(map(unicode, self.children))
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Node(self.type, [ch.clone() for ch in self.children],
fixers_applied=self.fixers_applied)
def post_order(self):
"""Return a post-order iterator for the tree."""
for child in self.children:
for node in child.post_order():
yield node
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
for child in self.children:
for node in child.pre_order():
yield node
def _prefix_getter(self):
"""
The whitespace and comments preceding this node in the input.
"""
if not self.children:
return ""
return self.children[0].prefix
def _prefix_setter(self, prefix):
if self.children:
self.children[0].prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def set_child(self, i, child):
"""
Equivalent to 'node.children[i] = child'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
def insert_child(self, i, child):
"""
Equivalent to 'node.children.insert(i, child)'. This method also sets
the child's parent attribute appropriately.
"""
child.parent = self
self.children.insert(i, child)
self.changed()
def append_child(self, child):
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed()
class Leaf(Base):
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
_prefix = "" # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value,
context=None,
prefix=None,
fixers_applied=[]):
"""
Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument.
"""
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied = fixers_applied[:]
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.type,
self.value)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + unicode(self.value)
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Leaf(self.type, self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied)
def leaves(self):
yield self
def post_order(self):
"""Return a post-order iterator for the tree."""
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
def _prefix_getter(self):
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
def _prefix_setter(self, prefix):
self.changed()
self._prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
class BasePattern(object):
"""
A pattern is a tree matching pattern.
It looks for a specific node type (token or symbol), and
optionally for a specific content.
This is an abstract base class. There are three concrete
subclasses:
- LeafPattern matches a single leaf node;
- NodePattern matches a single node (usually non-leaf);
- WildcardPattern matches a sequence of nodes of variable length.
"""
# Defaults for instance variables
type = None # Node type (token if < 256, symbol if >= 256)
content = None # Optional content matching pattern
name = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self):
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
def optimize(self):
"""
A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.
"""
return self
def match(self, node, results=None):
"""
Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.
"""
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes, results=None):
"""
Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes):
"""
Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.
"""
r = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, basestring), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
class NodePattern(BasePattern):
wildcards = False
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, basestring), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
"""
A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
def __init__(self, content=None, min=0, max=HUGE, name=None):
"""
Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
content = tuple(map(tuple, content)) # Protect against alterations
# Check sanity of alternatives
assert len(content), repr(content) # Can't have zero alternatives
for alt in content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = content
self.min = min
self.max = max
self.name = name
def optimize(self):
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (self.content is not None and
len(self.content) == 1 and len(self.content[0]) == 1):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
subpattern.min <= 1 and self.name == subpattern.name):
return WildcardPattern(subpattern.content,
self.min*subpattern.min,
self.max*subpattern.max,
subpattern.name)
return self
def match(self, node, results=None):
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None):
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes):
"""
Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in xrange(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored. We don't do this on non-CPython implementation because
# they don't have this problem.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
if hasattr(sys, "getrefcount"):
sys.stderr = save_stderr
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes):
"""Special optimized matcher for bare_name."""
count = 0
r = {}
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count):
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content=None):
"""
Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node):
# We never match a node in its entirety
return False
def match_seq(self, nodes):
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes):
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(patterns, nodes):
"""
Generator yielding matches for a sequence of patterns and nodes.
Args:
patterns: a sequence of patterns
nodes: a sequence of nodes
Yields:
(count, results) tuples where:
count: the entire sequence of patterns matches nodes[:count];
results: dict containing named submatches.
"""
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
| apache-2.0 |
pepeantena4040/MiSitioWeb | lib/toaster/orm/migrations/0011_auto__add_field_projectlayer_dirpath.py | 6 | 17973 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProjectLayer.dirpath'
db.add_column(u'orm_projectlayer', 'dirpath',
self.gf('django.db.models.fields.CharField')(default='', max_length=254),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ProjectLayer.dirpath'
db.delete_column(u'orm_projectlayer', 'dirpath')
models = {
u'orm.build': {
'Meta': {'object_name': 'Build'},
'bitbake_version': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'build_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'completed_on': ('django.db.models.fields.DateTimeField', [], {}),
'cooker_log_path': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'distro': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'distro_version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'errors_no': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'machine': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'outcome': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']", 'null': 'True'}),
'started_on': ('django.db.models.fields.DateTimeField', [], {}),
'timespent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'warnings_no': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'orm.helptext': {
'Meta': {'object_name': 'HelpText'},
'area': ('django.db.models.fields.IntegerField', [], {}),
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'helptext_build'", 'to': u"orm['orm.Build']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'orm.layer': {
'Meta': {'object_name': 'Layer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_index_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'local_path': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'orm.layer_version': {
'Meta': {'object_name': 'Layer_Version'},
'branch': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'layer_version_build'", 'to': u"orm['orm.Build']"}),
'commit': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'layer_version_layer'", 'to': u"orm['orm.Layer']"}),
'priority': ('django.db.models.fields.IntegerField', [], {})
},
u'orm.logmessage': {
'Meta': {'object_name': 'LogMessage'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lineno': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'pathname': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Task']", 'null': 'True', 'blank': 'True'})
},
u'orm.package': {
'Meta': {'object_name': 'Package'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'installed_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Recipe']", 'null': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'orm.package_dependency': {
'Meta': {'object_name': 'Package_Dependency'},
'dep_type': ('django.db.models.fields.IntegerField', [], {}),
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_dependencies_target'", 'to': u"orm['orm.Package']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_dependencies_source'", 'to': u"orm['orm.Package']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']", 'null': 'True'})
},
u'orm.package_file': {
'Meta': {'object_name': 'Package_File'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'buildfilelist_package'", 'to': u"orm['orm.Package']"}),
'path': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
u'orm.project': {
'Meta': {'object_name': 'Project'},
'branch': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'orm.projectlayer': {
'Meta': {'object_name': 'ProjectLayer'},
'commit': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'dirpath': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'giturl': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']"})
},
u'orm.projecttarget': {
'Meta': {'object_name': 'ProjectTarget'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']"}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'orm.projectvariable': {
'Meta': {'object_name': 'ProjectVariable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Project']"}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'orm.recipe': {
'Meta': {'object_name': 'Recipe'},
'bugtracker': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file_path': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'recipe_layer_version'", 'to': u"orm['orm.Layer_Version']"}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'orm.recipe_dependency': {
'Meta': {'object_name': 'Recipe_Dependency'},
'dep_type': ('django.db.models.fields.IntegerField', [], {}),
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'r_dependencies_depends'", 'to': u"orm['orm.Recipe']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'r_dependencies_recipe'", 'to': u"orm['orm.Recipe']"})
},
u'orm.target': {
'Meta': {'object_name': 'Target'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Build']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'license_manifest_path': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'orm.target_file': {
'Meta': {'object_name': 'Target_File'},
'directory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'directory_set'", 'null': 'True', 'to': u"orm['orm.Target_File']"}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inodetype': ('django.db.models.fields.IntegerField', [], {}),
'owner': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.FilePathField', [], {'max_length': '100'}),
'permission': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'sym_target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'symlink_set'", 'null': 'True', 'to': u"orm['orm.Target_File']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"})
},
u'orm.target_image_file': {
'Meta': {'object_name': 'Target_Image_File'},
'file_name': ('django.db.models.fields.FilePathField', [], {'max_length': '254'}),
'file_size': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"})
},
u'orm.target_installed_package': {
'Meta': {'object_name': 'Target_Installed_Package'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'buildtargetlist_package'", 'to': u"orm['orm.Package']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orm.Target']"})
},
u'orm.task': {
'Meta': {'ordering': "('order', 'recipe')", 'unique_together': "(('build', 'recipe', 'task_name'),)", 'object_name': 'Task'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_build'", 'to': u"orm['orm.Build']"}),
'cpu_usage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2'}),
'disk_io': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'elapsed_time': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_number': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'logfile': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'outcome': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'path_to_sstate_obj': ('django.db.models.fields.FilePathField', [], {'max_length': '500', 'blank': 'True'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'build_recipe'", 'to': u"orm['orm.Recipe']"}),
'script_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'source_url': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'}),
'sstate_checksum': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'sstate_result': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task_executed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'work_directory': ('django.db.models.fields.FilePathField', [], {'max_length': '255', 'blank': 'True'})
},
u'orm.task_dependency': {
'Meta': {'object_name': 'Task_Dependency'},
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_dependencies_depends'", 'to': u"orm['orm.Task']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'task_dependencies_task'", 'to': u"orm['orm.Task']"})
},
u'orm.variable': {
'Meta': {'object_name': 'Variable'},
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variable_build'", 'to': u"orm['orm.Build']"}),
'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'human_readable_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'variable_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'variable_value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'orm.variablehistory': {
'Meta': {'object_name': 'VariableHistory'},
'file_name': ('django.db.models.fields.FilePathField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_number': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'operation': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'variable': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vhistory'", 'to': u"orm['orm.Variable']"})
}
}
complete_apps = ['orm'] | gpl-2.0 |
jabesq/home-assistant | homeassistant/components/totalconnect/alarm_control_panel.py | 1 | 4506 | """Interfaces with TotalConnect alarm control panels."""
import logging
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED, STATE_ALARM_ARMING, STATE_ALARM_DISARMING,
STATE_ALARM_TRIGGERED, STATE_ALARM_ARMED_CUSTOM_BYPASS)
from . import DOMAIN as TOTALCONNECT_DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up an alarm control panel for a TotalConnect device."""
if discovery_info is None:
return
alarms = []
client = hass.data[TOTALCONNECT_DOMAIN].client
for location in client.locations:
location_id = location.get('LocationID')
name = location.get('LocationName')
alarms.append(TotalConnectAlarm(name, location_id, client))
add_entities(alarms)
class TotalConnectAlarm(alarm.AlarmControlPanel):
"""Represent an TotalConnect status."""
def __init__(self, name, location_id, client):
"""Initialize the TotalConnect status."""
self._name = name
self._location_id = location_id
self._client = client
self._state = None
self._device_state_attributes = {}
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._device_state_attributes
def update(self):
"""Return the state of the device."""
status = self._client.get_armed_status(self._name)
attr = {
'location_name': self._name,
'location_id': self._location_id,
'ac_loss': self._client.ac_loss,
'low_battery': self._client.low_battery,
'triggered_source': None,
'triggered_zone': None
}
if status == self._client.DISARMED:
state = STATE_ALARM_DISARMED
elif status == self._client.DISARMED_BYPASS:
state = STATE_ALARM_DISARMED
elif status == self._client.ARMED_STAY:
state = STATE_ALARM_ARMED_HOME
elif status == self._client.ARMED_STAY_INSTANT:
state = STATE_ALARM_ARMED_HOME
elif status == self._client.ARMED_STAY_INSTANT_BYPASS:
state = STATE_ALARM_ARMED_HOME
elif status == self._client.ARMED_STAY_NIGHT:
state = STATE_ALARM_ARMED_NIGHT
elif status == self._client.ARMED_AWAY:
state = STATE_ALARM_ARMED_AWAY
elif status == self._client.ARMED_AWAY_BYPASS:
state = STATE_ALARM_ARMED_AWAY
elif status == self._client.ARMED_AWAY_INSTANT:
state = STATE_ALARM_ARMED_AWAY
elif status == self._client.ARMED_AWAY_INSTANT_BYPASS:
state = STATE_ALARM_ARMED_AWAY
elif status == self._client.ARMED_CUSTOM_BYPASS:
state = STATE_ALARM_ARMED_CUSTOM_BYPASS
elif status == self._client.ARMING:
state = STATE_ALARM_ARMING
elif status == self._client.DISARMING:
state = STATE_ALARM_DISARMING
elif status == self._client.ALARMING:
state = STATE_ALARM_TRIGGERED
attr['triggered_source'] = 'Police/Medical'
elif status == self._client.ALARMING_FIRE_SMOKE:
state = STATE_ALARM_TRIGGERED
attr['triggered_source'] = 'Fire/Smoke'
elif status == self._client.ALARMING_CARBON_MONOXIDE:
state = STATE_ALARM_TRIGGERED
attr['triggered_source'] = 'Carbon Monoxide'
else:
logging.info("Total Connect Client returned unknown "
"status code: %s", status)
state = None
self._state = state
self._device_state_attributes = attr
def alarm_disarm(self, code=None):
"""Send disarm command."""
self._client.disarm(self._name)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self._client.arm_stay(self._name)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self._client.arm_away(self._name)
def alarm_arm_night(self, code=None):
"""Send arm night command."""
self._client.arm_stay_night(self._name)
| apache-2.0 |
iguzu/gae-django | django/contrib/gis/gdal/prototypes/generation.py | 321 | 3766 | """
This module contains functions that generate ctypes prototypes for the
GDAL routines.
"""
from ctypes import c_char_p, c_double, c_int, c_void_p
from django.contrib.gis.gdal.prototypes.errcheck import \
check_arg_errcode, check_errcode, check_geom, check_geom_offset, \
check_pointer, check_srs, check_str_arg, check_string, check_const_string
class gdal_char_p(c_char_p):
pass
def double_output(func, argtypes, errcheck=False, strarg=False):
"Generates a ctypes function that returns a double value."
func.argtypes = argtypes
func.restype = c_double
if errcheck: func.errcheck = check_arg_errcode
if strarg: func.errcheck = check_str_arg
return func
def geom_output(func, argtypes, offset=None):
"""
Generates a function that returns a Geometry either by reference
or directly (if the return_geom keyword is set to True).
"""
# Setting the argument types
func.argtypes = argtypes
if not offset:
# When a geometry pointer is directly returned.
func.restype = c_void_p
func.errcheck = check_geom
else:
# Error code returned, geometry is returned by-reference.
func.restype = c_int
def geomerrcheck(result, func, cargs):
return check_geom_offset(result, func, cargs, offset)
func.errcheck = geomerrcheck
return func
def int_output(func, argtypes):
"Generates a ctypes function that returns an integer value."
func.argtypes = argtypes
func.restype = c_int
return func
def srs_output(func, argtypes):
"""
Generates a ctypes prototype for the given function with
the given C arguments that returns a pointer to an OGR
Spatial Reference System.
"""
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_srs
return func
def const_string_output(func, argtypes, offset=None):
func.argtypes = argtypes
if offset:
func.restype = c_int
else:
func.restype = c_char_p
def _check_const(result, func, cargs):
return check_const_string(result, func, cargs, offset=offset)
func.errcheck = _check_const
return func
def string_output(func, argtypes, offset=-1, str_result=False):
"""
Generates a ctypes prototype for the given function with the
given argument types that returns a string from a GDAL pointer.
The `const` flag indicates whether the allocated pointer should
be freed via the GDAL library routine VSIFree -- but only applies
only when `str_result` is True.
"""
func.argtypes = argtypes
if str_result:
# Use subclass of c_char_p so the error checking routine
# can free the memory at the pointer's address.
func.restype = gdal_char_p
else:
# Error code is returned
func.restype = c_int
# Dynamically defining our error-checking function with the
# given offset.
def _check_str(result, func, cargs):
return check_string(result, func, cargs,
offset=offset, str_result=str_result)
func.errcheck = _check_str
return func
def void_output(func, argtypes, errcheck=True):
"""
For functions that don't only return an error code that needs to
be examined.
"""
if argtypes: func.argtypes = argtypes
if errcheck:
# `errcheck` keyword may be set to False for routines that
# return void, rather than a status code.
func.restype = c_int
func.errcheck = check_errcode
else:
func.restype = None
return func
def voidptr_output(func, argtypes):
"For functions that return c_void_p."
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_pointer
return func
| bsd-3-clause |
watermelo/libcloud | example_loadbalancer.py | 58 | 2483 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from libcloud.loadbalancer.base import Member, Algorithm
from libcloud.loadbalancer.types import Provider, State
from libcloud.loadbalancer.providers import get_driver
def main():
cls = get_driver(Provider.RACKSPACE)
driver = cls('username', 'api key', region='ord')
balancers = driver.list_balancers()
print(balancers)
# creating a balancer which balances traffic across two
# nodes: 192.168.86.1:80 and 192.168.86.2:8080. Balancer
# itself listens on port 80/tcp
new_balancer_name = 'testlb' + os.urandom(4).encode('hex')
members = (Member(None, '192.168.86.1', 80),
Member(None, '192.168.86.2', 8080))
new_balancer = driver.create_balancer(name=new_balancer_name,
algorithm=Algorithm.ROUND_ROBIN,
port=80,
protocol='http',
members=members)
print(new_balancer)
# wait for balancer to become ready
# NOTE: in real life code add timeout to not end up in
# endless loop when things go wrong on provider side
while True:
balancer = driver.get_balancer(balancer_id=new_balancer.id)
if balancer.state == State.RUNNING:
break
print('sleeping for 30 seconds for balancers to become ready')
time.sleep(30)
# fetch list of members
members = balancer.list_members()
print(members)
# remove first member
balancer.detach_member(members[0])
# remove the balancer
driver.destroy_balancer(new_balancer)
if __name__ == '__main__':
main()
| apache-2.0 |
w1kke/pylearn2 | pylearn2/linear/tests/test_conv2d.py | 45 | 5497 | import theano
from theano import tensor
import numpy
from pylearn2.linear.conv2d import Conv2D, make_random_conv2D
from pylearn2.space import Conv2DSpace
from pylearn2.utils import sharedX
import unittest
try:
scipy_available = True
import scipy.ndimage
except ImportError:
scipy_available = False
class TestConv2D(unittest.TestCase):
"""
Tests for Conv2D code
"""
def setUp(self):
"""
Set up a test image and filter to re-use
"""
self.image = numpy.random.rand(1, 3, 3, 1).astype(theano.config.floatX)
self.image_tensor = tensor.tensor4()
self.input_space = Conv2DSpace((3, 3), 1)
self.filters_values = numpy.ones(
(1, 1, 2, 2), dtype=theano.config.floatX
)
self.filters = sharedX(self.filters_values, name='filters')
self.conv2d = Conv2D(self.filters, 1, self.input_space)
def test_value_errors(self):
"""
Check correct errors are raised when bad input is given
"""
bad_filters = sharedX(numpy.zeros((1, 3, 2)))
self.assertRaises(ValueError, Conv2D, bad_filters, 1, self.input_space)
self.assertRaises(AssertionError, Conv2D, self.filters, 0,
self.input_space)
def test_get_params(self):
"""
Check whether the conv2d has stored the correct filters
"""
assert self.conv2d.get_params() == [self.filters]
def test_lmul(self):
"""
Use SciPy's ndimage to check whether the convolution worked
correctly
"""
f = theano.function([self.image_tensor],
self.conv2d.lmul(self.image_tensor))
if scipy_available:
numpy.allclose(
f(self.image).reshape((2, 2)),
scipy.ndimage.filters.convolve(
self.image.reshape((3, 3)),
self.filters_values.reshape((2, 2))
)[:2, :2]
)
def test_lmul_T(self):
"""
Check whether this function outputs the right shape
"""
conv2d = self.conv2d.lmul(self.image_tensor)
f = theano.function([self.image_tensor],
self.conv2d.lmul_T(conv2d))
assert f(self.image).shape == self.image.shape
def test_lmul_sq_T(self):
"""
Check whether this function outputs the same values as when
taking the square manually
"""
conv2d_sq = Conv2D(sharedX(numpy.square(self.filters_values)),
1, self.input_space
).lmul(self.image_tensor)
conv2d = self.conv2d.lmul(self.image_tensor)
f = theano.function([self.image_tensor],
self.conv2d.lmul_T(conv2d_sq))
f2 = theano.function([self.image_tensor],
self.conv2d.lmul_sq_T(conv2d))
numpy.testing.assert_allclose(f(self.image), f2(self.image))
def test_set_batch_size(self):
"""
Make sure that setting the batch size actually changes the property
"""
cur_img_shape = self.conv2d._img_shape
cur_batch_size = self.conv2d._img_shape[0]
self.conv2d.set_batch_size(cur_batch_size + 10)
assert self.conv2d._img_shape[0] == cur_batch_size + 10
assert self.conv2d._img_shape[1:] == cur_img_shape[1:]
def test_axes(self):
"""
Use different output axes and see whether the output is what we
expect
"""
default_axes = ('b', 0, 1, 'c')
axes = (0, 'b', 1, 'c')
mapping = tuple(axes.index(axis) for axis in default_axes)
input_space = Conv2DSpace((3, 3), num_channels=1, axes=axes)
conv2d = Conv2D(self.filters, 1, input_space, output_axes=axes)
f_axes = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
f = theano.function([self.image_tensor],
self.conv2d.lmul(self.image_tensor))
output_axes = f_axes(numpy.transpose(self.image, mapping))
output = f(self.image)
output_axes = numpy.transpose(output_axes, mapping)
numpy.testing.assert_allclose(output, output_axes)
assert output.shape == output_axes.shape
def test_channels(self):
"""
Go from 2 to 3 channels and see whether the shape is correct
"""
input_space = Conv2DSpace((3, 3), num_channels=3)
filters_values = numpy.ones(
(2, 3, 2, 2), dtype=theano.config.floatX
)
filters = sharedX(filters_values)
image = numpy.random.rand(1, 3, 3, 3).astype(theano.config.floatX)
conv2d = Conv2D(filters, 1, input_space)
f = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
assert f(image).shape == (1, 2, 2, 2)
def test_make_random_conv2D(self):
"""
Create a random convolution and check whether the shape, axes and
input space are all what we expect
"""
output_space = Conv2DSpace((2, 2), 1)
conv2d = make_random_conv2D(1, self.input_space, output_space,
(2, 2), 1)
f = theano.function([self.image_tensor],
conv2d.lmul(self.image_tensor))
assert f(self.image).shape == (1, 2, 2, 1)
assert conv2d.input_space == self.input_space
assert conv2d.output_axes == output_space.axes
| bsd-3-clause |
pedja1/aNmap | dSploit/jni/nmap/zenmap/zenmapGUI/higwidgets/higprogressbars.py | 4 | 9819 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@nmap.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the terms and conditions of this license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@nmap.com for further *
# * information. *
# * *
# * If you have received a written license agreement or contract for *
# * Covered Software stating terms other than these, you may choose to use *
# * and redistribute Covered Software under those terms instead of these. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
"""
higwidgets/higprogressbars.py
progress bars classes
"""
__all__ = ['HIGLabeledProgressBar']
import gtk
from higboxes import HIGHBox
class HIGLabeledProgressBar(HIGHBox):
def __init__(self, label=None):
HIGHBox.__init__(self)
if label:
self.label = HIGEntryLabel(label)
self.pack_label(self.label)
self.progress_bar = gtk.ProgressBar()
self.progress_bar.set_size_request(80, 16)
self.pack_label(self.progress_bar)
def show(self):
HIGHBox.show_all(self)
| gpl-3.0 |
kutuhal/oracle-r12-accounting | lib/django/core/management/commands/migrate.py | 36 | 17808 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
import time
import traceback
import warnings
from collections import OrderedDict
from importlib import import_module
from django.apps import apps
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import (
custom_sql_for_model, emit_post_migrate_signal, emit_pre_migrate_signal,
)
from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ProjectState
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
def add_arguments(self, parser):
parser.add_argument('app_label', nargs='?',
help='App label of an application to synchronize the state.')
parser.add_argument('migration_name', nargs='?',
help=(
'Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.'
),
)
parser.add_argument('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('--no-initial-data', action='store_false', dest='load_initial_data', default=True,
help='Tells Django not to load any initial data after database synchronization.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.')
parser.add_argument('--fake', action='store_true', dest='fake', default=False,
help='Mark migrations as run without actually running them.')
parser.add_argument('--fake-initial', action='store_true', dest='fake_initial', default=False,
help='Detect if tables already exist and fake-apply initial migrations if so. Make sure '
'that the current database schema matches your initial migration before using this '
'flag. Django will only check for an existing table name.')
parser.add_argument('--list', '-l', action='store_true', dest='list', default=False,
help='Show a list of all known migrations and which are applied.')
def handle(self, *args, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
self.show_traceback = options.get('traceback')
self.load_initial_data = options.get('load_initial_data')
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
# If they asked for a migration listing, quit main execution flow and show it
if options.get("list", False):
warnings.warn(
"The 'migrate --list' command is deprecated. Use 'showmigrations' instead.",
RemovedInDjango110Warning, stacklevel=2)
self.stdout.ending = None # Remove when #21429 is fixed
return call_command(
'showmigrations',
'--list',
app_labels=[options['app_label']] if options['app_label'] else None,
database=db,
no_color=options.get('no_color'),
settings=options.get('settings'),
stdout=self.stdout,
traceback=self.show_traceback,
verbosity=self.verbosity,
)
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
run_syncdb = False
target_app_labels_only = True
if options['app_label'] and options['migration_name']:
app_label, migration_name = options['app_label'], options['migration_name']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (you cannot selectively "
"sync unmigrated apps)" % app_label
)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
app_label = options['app_label']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (you cannot selectively "
"sync unmigrated apps)" % app_label
)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
run_syncdb = True
plan = executor.migration_plan(targets)
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb and executor.loader.unmigrated_apps:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(executor.loader.unmigrated_apps))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(set(a for a, n in targets)) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(
" Unapply all migrations: ") + "%s" % (targets[0][0], )
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
# Run the syncdb phase.
# If you ever manage to get rid of this, I owe you many, many drinks.
# Note that pre_migrate is called from inside here, as it needs
# the list of models about to be installed.
if run_syncdb and executor.loader.unmigrated_apps:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
created_models = self.sync_apps(connection, executor.loader.unmigrated_apps)
else:
created_models = []
emit_pre_migrate_signal([], self.verbosity, self.interactive, connection.alias)
# The test runner requires us to flush after a syncdb but before migrations,
# so do that here.
if options.get("test_flush", False):
call_command(
'flush',
verbosity=max(self.verbosity - 1, 0),
interactive=False,
database=db,
reset_sequences=False,
inhibit_post_migrate=True,
)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
executor.check_replacements()
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(
" Your models have changes that are not yet reflected "
"in a migration, and so won't be applied."
))
self.stdout.write(self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
))
else:
fake = options.get("fake")
fake_initial = options.get("fake_initial")
executor.migrate(targets, plan, fake=fake, fake_initial=fake_initial)
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(created_models, self.verbosity, self.interactive, connection.alias)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.MIGRATE_SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.MIGRATE_SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Rendering model states...", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
self.stdout.write(self.style.MIGRATE_SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"Runs the old syncdb-style operation on a list of app_labels."
cursor = connection.cursor()
try:
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names(cursor)
created_models = set()
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=False))
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
# Note that if a model is unmanaged we short-circuit and never try to install it
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
create_models = set(itertools.chain(*manifest.values()))
emit_pre_migrate_signal(create_models, self.verbosity, self.interactive, connection.alias)
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with transaction.atomic(using=connection.alias, savepoint=connection.features.can_rollback_ddl):
deferred_sql = []
for app_name, model_list in manifest.items():
for model in model_list:
if model._meta.proxy or not model._meta.managed:
continue
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model\n" % (app_name, model._meta.object_name)
)
with connection.schema_editor() as editor:
if self.verbosity >= 1:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
editor.create_model(model)
deferred_sql.extend(editor.deferred_sql)
editor.deferred_sql = []
created_models.add(model)
if self.verbosity >= 1:
self.stdout.write(" Running deferred SQL...\n")
for statement in deferred_sql:
cursor.execute(statement)
finally:
cursor.close()
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
try:
# Install custom SQL for the app (but only if this
# is a model we've just created)
if self.verbosity >= 1:
self.stdout.write(" Installing custom SQL...\n")
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, no_style(), connection)
if custom_sql:
if self.verbosity >= 2:
self.stdout.write(
" Installing custom SQL for %s.%s model\n" %
(app_name, model._meta.object_name)
)
try:
with transaction.atomic(using=connection.alias):
for sql in custom_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write(
" Failed to install custom SQL for %s.%s model: %s\n"
% (app_name, model._meta.object_name, e)
)
if self.show_traceback:
traceback.print_exc()
else:
if self.verbosity >= 3:
self.stdout.write(
" No custom SQL for %s.%s model\n" %
(app_name, model._meta.object_name)
)
finally:
cursor.close()
# Load initial_data fixtures (unless that has been disabled)
if self.load_initial_data:
for app_label in app_labels:
call_command(
'loaddata', 'initial_data', verbosity=self.verbosity,
database=connection.alias, app_label=app_label,
hide_empty=True,
)
return created_models
| bsd-3-clause |
zyq001/ryu | ryu/controller/ofp_handler.py | 18 | 11003 | # Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011, 2012 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Basic OpenFlow handling including negotiation.
"""
import itertools
import logging
import warnings
import ryu.base.app_manager
from ryu.lib import hub
from ryu import utils
from ryu.controller import ofp_event
from ryu.controller.controller import OpenFlowController
from ryu.controller.handler import set_ev_handler
from ryu.controller.handler import HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER,\
MAIN_DISPATCHER
# The state transition: HANDSHAKE -> CONFIG -> MAIN
#
# HANDSHAKE: if it receives HELLO message with the valid OFP version,
# sends Features Request message, and moves to CONFIG.
#
# CONFIG: it receives Features Reply message and moves to MAIN
#
# MAIN: it does nothing. Applications are expected to register their
# own handlers.
#
# Note that at any state, when we receive Echo Request message, send
# back Echo Reply message.
class OFPHandler(ryu.base.app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(OFPHandler, self).__init__(*args, **kwargs)
self.name = 'ofp_event'
def start(self):
super(OFPHandler, self).start()
return hub.spawn(OpenFlowController())
def _hello_failed(self, datapath, error_desc):
self.logger.error(error_desc)
error_msg = datapath.ofproto_parser.OFPErrorMsg(datapath)
error_msg.type = datapath.ofproto.OFPET_HELLO_FAILED
error_msg.code = datapath.ofproto.OFPHFC_INCOMPATIBLE
error_msg.data = error_desc
datapath.send_msg(error_msg)
@set_ev_handler(ofp_event.EventOFPHello, HANDSHAKE_DISPATCHER)
def hello_handler(self, ev):
self.logger.debug('hello ev %s', ev)
msg = ev.msg
datapath = msg.datapath
# check if received version is supported.
# pre 1.0 is not supported
elements = getattr(msg, 'elements', None)
if elements:
switch_versions = set()
for version in itertools.chain.from_iterable(
element.versions for element in elements):
switch_versions.add(version)
usable_versions = switch_versions & set(
datapath.supported_ofp_version)
# We didn't send our supported versions for interoperability as
# most switches would not understand elements at the moment.
# So the switch would think that the negotiated version would
# be max(negotiated_versions), but actual usable version is
# max(usable_versions).
negotiated_versions = set(
version for version in switch_versions
if version <= max(datapath.supported_ofp_version))
if negotiated_versions and not usable_versions:
# e.g.
# versions of OF 1.0 and 1.1 from switch
# max of OF 1.2 from Ryu and supported_ofp_version = (1.2, )
# negotiated version = 1.1
# usable version = None
error_desc = (
'no compatible version found: '
'switch versions %s controller version 0x%x, '
'the negotiated version is 0x%x, '
'but no usable version found. '
'If possible, set the switch to use one of OF version %s'
% (switch_versions, max(datapath.supported_ofp_version),
max(negotiated_versions),
sorted(datapath.supported_ofp_version)))
self._hello_failed(datapath, error_desc)
return
if (negotiated_versions and usable_versions and
max(negotiated_versions) != max(usable_versions)):
# e.g.
# versions of OF 1.0 and 1.1 from switch
# max of OF 1.2 from Ryu and supported_ofp_version = (1.0, 1.2)
# negotiated version = 1.1
# usable version = 1.0
#
# TODO: In order to get the version 1.0, Ryu need to send
# supported verions.
error_desc = (
'no compatible version found: '
'switch versions 0x%x controller version 0x%x, '
'the negotiated version is %s but found usable %s. '
'If possible, '
'set the switch to use one of OF version %s' % (
max(switch_versions),
max(datapath.supported_ofp_version),
sorted(negotiated_versions),
sorted(usable_versions), sorted(usable_versions)))
self._hello_failed(datapath, error_desc)
return
else:
usable_versions = set(version for version
in datapath.supported_ofp_version
if version <= msg.version)
if (usable_versions and
max(usable_versions) != min(msg.version,
datapath.ofproto.OFP_VERSION)):
# The version of min(msg.version, datapath.ofproto.OFP_VERSION)
# should be used according to the spec. But we can't.
# So log it and use max(usable_versions) with the hope that
# the switch is able to understand lower version.
# e.g.
# OF 1.1 from switch
# OF 1.2 from Ryu and supported_ofp_version = (1.0, 1.2)
# In this case, 1.1 should be used according to the spec,
# but 1.1 can't be used.
#
# OF1.3.1 6.3.1
# Upon receipt of this message, the recipient must
# calculate the OpenFlow protocol version to be used. If
# both the Hello message sent and the Hello message
# received contained a OFPHET_VERSIONBITMAP hello element,
# and if those bitmaps have some common bits set, the
# negotiated version must be the highest version set in
# both bitmaps. Otherwise, the negotiated version must be
# the smaller of the version number that was sent and the
# one that was received in the version fields. If the
# negotiated version is supported by the recipient, then
# the connection proceeds. Otherwise, the recipient must
# reply with an OFPT_ERROR message with a type field of
# OFPET_HELLO_FAILED, a code field of OFPHFC_INCOMPATIBLE,
# and optionally an ASCII string explaining the situation
# in data, and then terminate the connection.
version = max(usable_versions)
error_desc = (
'no compatible version found: '
'switch 0x%x controller 0x%x, but found usable 0x%x. '
'If possible, set the switch to use OF version 0x%x' % (
msg.version, datapath.ofproto.OFP_VERSION,
version, version))
self._hello_failed(datapath, error_desc)
return
if not usable_versions:
error_desc = (
'unsupported version 0x%x. '
'If possible, set the switch to use one of the versions %s' % (
msg.version, sorted(datapath.supported_ofp_version)))
self._hello_failed(datapath, error_desc)
return
datapath.set_version(max(usable_versions))
# now send feature
features_reqeust = datapath.ofproto_parser.OFPFeaturesRequest(datapath)
datapath.send_msg(features_reqeust)
# now move on to config state
self.logger.debug('move onto config mode')
datapath.set_state(CONFIG_DISPATCHER)
@set_ev_handler(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
self.logger.debug('switch features ev %s', msg)
datapath.id = msg.datapath_id
# hacky workaround, will be removed. OF1.3 doesn't have
# ports. An application should not depend on them. But there
# might be such bad applications so keep this workaround for
# while.
if datapath.ofproto.OFP_VERSION < 0x04:
datapath.ports = msg.ports
else:
datapath.ports = {}
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
set_config = ofproto_parser.OFPSetConfig(
datapath, ofproto.OFPC_FRAG_NORMAL,
128 # TODO:XXX
)
datapath.send_msg(set_config)
if datapath.ofproto.OFP_VERSION < 0x04:
self.logger.debug('move onto main mode')
ev.msg.datapath.set_state(MAIN_DISPATCHER)
else:
port_desc = datapath.ofproto_parser.OFPPortDescStatsRequest(
datapath, 0)
datapath.send_msg(port_desc)
@set_ev_handler(ofp_event.EventOFPPortDescStatsReply, CONFIG_DISPATCHER)
def multipart_reply_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for port in msg.body:
datapath.ports[port.port_no] = port
if msg.flags & datapath.ofproto.OFPMPF_REPLY_MORE:
return
self.logger.debug('move onto main mode')
ev.msg.datapath.set_state(MAIN_DISPATCHER)
@set_ev_handler(ofp_event.EventOFPEchoRequest,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def echo_request_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
echo_reply = datapath.ofproto_parser.OFPEchoReply(datapath)
echo_reply.xid = msg.xid
echo_reply.data = msg.data
datapath.send_msg(echo_reply)
@set_ev_handler(ofp_event.EventOFPErrorMsg,
[HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
msg = ev.msg
self.logger.debug('error msg ev %s type 0x%x code 0x%x %s',
msg, msg.type, msg.code, utils.hex_array(msg.data))
| apache-2.0 |
ahnitz/pycbc | pycbc/rate.py | 14 | 12540 | import numpy
import bisect
from . import bin_utils
def integral_element(mu, pdf):
'''
Returns an array of elements of the integrand dP = p(mu) dmu
for a density p(mu) defined at sample values mu ; samples need
not be equally spaced. Uses a simple trapezium rule.
Number of dP elements is 1 - (number of mu samples).
'''
dmu = mu[1:] - mu[:-1]
bin_mean = (pdf[1:] + pdf[:-1]) / 2.
return dmu * bin_mean
def normalize_pdf(mu, pofmu):
"""
Takes a function pofmu defined at rate sample values mu and
normalizes it to be a suitable pdf. Both mu and pofmu must be
arrays or lists of the same length.
"""
if min(pofmu) < 0:
raise ValueError("Probabilities cannot be negative, don't ask me to "
"normalize a function with negative values!")
if min(mu) < 0:
raise ValueError("Rates cannot be negative, don't ask me to "
"normalize a function over a negative domain!")
dp = integral_element(mu, pofmu)
return mu, pofmu/sum(dp)
def compute_upper_limit(mu_in, post, alpha=0.9):
"""
Returns the upper limit mu_high of confidence level alpha for a
posterior distribution post on the given parameter mu.
The posterior need not be normalized.
"""
if 0 < alpha < 1:
dp = integral_element(mu_in, post)
high_idx = bisect.bisect_left(dp.cumsum() / dp.sum(), alpha)
# if alpha is in (0,1] and post is non-negative, bisect_left
# will always return an index in the range of mu since
# post.cumsum()/post.sum() will always begin at 0 and end at 1
mu_high = mu_in[high_idx]
elif alpha == 1:
mu_high = numpy.max(mu_in[post > 0])
else:
raise ValueError("Confidence level must be in (0,1].")
return mu_high
def compute_lower_limit(mu_in, post, alpha=0.9):
"""
Returns the lower limit mu_low of confidence level alpha for a
posterior distribution post on the given parameter mu.
The posterior need not be normalized.
"""
if 0 < alpha < 1:
dp = integral_element(mu_in, post)
low_idx = bisect.bisect_right(dp.cumsum() / dp.sum(), 1 - alpha)
# if alpha is in [0,1) and post is non-negative, bisect_right
# will always return an index in the range of mu since
# post.cumsum()/post.sum() will always begin at 0 and end at 1
mu_low = mu_in[low_idx]
elif alpha == 1:
mu_low = numpy.min(mu_in[post > 0])
else:
raise ValueError("Confidence level must be in (0,1].")
return mu_low
def confidence_interval_min_width(mu, post, alpha=0.9):
'''
Returns the minimal-width confidence interval [mu_low, mu_high] of
confidence level alpha for a posterior distribution post on the parameter mu.
'''
if not 0 < alpha < 1:
raise ValueError("Confidence level must be in (0,1).")
# choose a step size for the sliding confidence window
alpha_step = 0.01
# initialize the lower and upper limits
mu_low = numpy.min(mu)
mu_high = numpy.max(mu)
# find the smallest window (by delta-mu) stepping by dalpha
for ai in numpy.arange(0, 1 - alpha, alpha_step):
ml = compute_lower_limit(mu, post, 1 - ai)
mh = compute_upper_limit(mu, post, alpha + ai)
if mh - ml < mu_high - mu_low:
mu_low = ml
mu_high = mh
return mu_low, mu_high
def hpd_coverage(mu, pdf, thresh):
'''
Integrates a pdf over mu taking only bins where
the mean over the bin is above a given threshold
This gives the coverage of the HPD interval for
the given threshold.
'''
dp = integral_element(mu, pdf)
bin_mean = (pdf[1:] + pdf[:-1]) / 2.
return dp[bin_mean > thresh].sum()
def hpd_threshold(mu_in, post, alpha, tol):
'''
For a PDF post over samples mu_in, find a density
threshold such that the region having higher density
has coverage of at least alpha, and less than alpha
plus a given tolerance.
'''
norm_post = normalize_pdf(mu_in, post)
# initialize bisection search
p_minus = 0.0
p_plus = max(post)
while abs(hpd_coverage(mu_in, norm_post, p_minus) -
hpd_coverage(mu_in, norm_post, p_plus)) >= tol:
p_test = (p_minus + p_plus) / 2.
if hpd_coverage(mu_in, post, p_test) >= alpha:
# test value was too low or just right
p_minus = p_test
else:
# test value was too high
p_plus = p_test
# p_minus never goes above the required threshold and p_plus never goes below
# thus on exiting p_minus is at or below the required threshold and the
# difference in coverage is within tolerance
return p_minus
def hpd_credible_interval(mu_in, post, alpha=0.9, tolerance=1e-3):
'''
Returns the minimum and maximum rate values of the HPD
(Highest Posterior Density) credible interval for a posterior
post defined at the sample values mu_in. Samples need not be
uniformly spaced and posterior need not be normalized.
Will not return a correct credible interval if the posterior
is multimodal and the correct interval is not contiguous;
in this case will over-cover by including the whole range from
minimum to maximum mu.
'''
if alpha == 1:
nonzero_samples = mu_in[post > 0]
mu_low = numpy.min(nonzero_samples)
mu_high = numpy.max(nonzero_samples)
elif 0 < alpha < 1:
# determine the highest PDF for which the region with
# higher density has sufficient coverage
pthresh = hpd_threshold(mu_in, post, alpha, tol=tolerance)
samples_over_threshold = mu_in[post > pthresh]
mu_low = numpy.min(samples_over_threshold)
mu_high = numpy.max(samples_over_threshold)
return mu_low, mu_high
# Following functions are for the old pylal volume vs mass calculations
# These were replaced by 'imr_utils' functions now contained in sensitivity.py
# and bin_utils.py
def integrate_efficiency(dbins, eff, err=0, logbins=False):
if logbins:
logd = numpy.log(dbins)
dlogd = logd[1:] - logd[:-1]
# use log midpoint of bins
dreps = numpy.exp((numpy.log(dbins[1:]) + numpy.log(dbins[:-1])) / 2.)
vol = numpy.sum(4.*numpy.pi * dreps**3. * eff * dlogd)
# propagate errors in eff to errors in v
verr = numpy.sqrt(
numpy.sum((4.*numpy.pi * dreps**3. * err * dlogd)**2.)
)
else:
dd = dbins[1:] - dbins[:-1]
dreps = (dbins[1:] + dbins[:-1]) / 2.
vol = numpy.sum(4. * numpy.pi * dreps**2. * eff * dd)
# propagate errors
verr = numpy.sqrt(numpy.sum((4.*numpy.pi * dreps**2. * err * dd)**2.))
return vol, verr
def compute_efficiency(f_dist, m_dist, dbins):
'''
Compute the efficiency as a function of distance for the given sets of found
and missed injection distances.
Note that injections that do not fit into any dbin get lost :(
'''
efficiency = numpy.zeros(len(dbins) - 1)
error = numpy.zeros(len(dbins) - 1)
for j, dlow in enumerate(dbins[:-1]):
dhigh = dbins[j + 1]
found = numpy.sum((dlow <= f_dist) * (f_dist < dhigh))
missed = numpy.sum((dlow <= m_dist) * (m_dist < dhigh))
if found+missed == 0:
# avoid divide by 0 in empty bins
missed = 1.
efficiency[j] = float(found) / (found + missed)
error[j] = numpy.sqrt(efficiency[j] * (1 - efficiency[j]) /
(found + missed))
return efficiency, error
def mean_efficiency_volume(found, missed, dbins):
if len(found) == 0:
# no efficiency here
return numpy.zeros(len(dbins) - 1), numpy.zeros(len(dbins) - 1), 0, 0
# only need distances
f_dist = numpy.array([l.distance for l in found])
m_dist = numpy.array([l.distance for l in missed])
# compute the efficiency and its variance
eff, err = compute_efficiency(f_dist, m_dist, dbins)
vol, verr = integrate_efficiency(dbins, eff, err)
return eff, err, vol, verr
def filter_injections_by_mass(injs, mbins, bin_num, bin_type, bin_num2=None):
'''
For a given set of injections (sim_inspiral rows), return the subset
of injections that fall within the given mass range.
'''
if bin_type == "Mass1_Mass2":
m1bins = numpy.concatenate((mbins.lower()[0],
numpy.array([mbins.upper()[0][-1]])))
m1lo = m1bins[bin_num]
m1hi = m1bins[bin_num + 1]
m2bins = numpy.concatenate((mbins.lower()[1],
numpy.array([mbins.upper()[1][-1]])))
m2lo = m2bins[bin_num2]
m2hi = m2bins[bin_num2 + 1]
newinjs = [l for l in injs if
((m1lo <= l.mass1 < m1hi and m2lo <= l.mass2 < m2hi) or
(m1lo <= l.mass2 < m1hi and m2lo <= l.mass1 < m2hi))]
return newinjs
mbins = numpy.concatenate((mbins.lower()[0],
numpy.array([mbins.upper()[0][-1]])))
mlow = mbins[bin_num]
mhigh = mbins[bin_num + 1]
if bin_type == "Chirp_Mass":
newinjs = [l for l in injs if (mlow <= l.mchirp < mhigh)]
elif bin_type == "Total_Mass":
newinjs = [l for l in injs if (mlow <= l.mass1 + l.mass2 < mhigh)]
elif bin_type == "Component_Mass":
# here it is assumed that m2 is fixed
newinjs = [l for l in injs if (mlow <= l.mass1 < mhigh)]
elif bin_type == "BNS_BBH":
if bin_num in [0, 2]:
# BNS/BBH case
newinjs = [l for l in injs if
(mlow <= l.mass1 < mhigh and mlow <= l.mass2 < mhigh)]
else:
# NSBH
newinjs = [l for l in injs if (mbins[0] <= l.mass1 < mbins[1] and
mbins[2] <= l.mass2 < mbins[3])]
# BHNS
newinjs += [l for l in injs if (mbins[0] <= l.mass2 < mbins[1] and
mbins[2] <= l.mass1 < mbins[3])]
return newinjs
def compute_volume_vs_mass(found, missed, mass_bins, bin_type, dbins=None):
"""
Compute the average luminosity an experiment was sensitive to
Assumes that luminosity is uniformly distributed in space.
Input is the sets of found and missed injections.
"""
# mean and std estimate for luminosity
volArray = bin_utils.BinnedArray(mass_bins)
vol2Array = bin_utils.BinnedArray(mass_bins)
# found/missed stats
foundArray = bin_utils.BinnedArray(mass_bins)
missedArray = bin_utils.BinnedArray(mass_bins)
# compute the mean luminosity in each mass bin
effvmass = []
errvmass = []
# 2D case first
if bin_type == "Mass1_Mass2":
for j, mc1 in enumerate(mass_bins.centres()[0]):
for k, mc2 in enumerate(mass_bins.centres()[1]):
newfound = filter_injections_by_mass(
found, mass_bins, j, bin_type, k)
newmissed = filter_injections_by_mass(
missed, mass_bins, j, bin_type, k)
foundArray[(mc1, mc2)] = len(newfound)
missedArray[(mc1, mc2)] = len(newmissed)
# compute the volume using this injection set
meaneff, efferr, meanvol, volerr = mean_efficiency_volume(
newfound, newmissed, dbins)
effvmass.append(meaneff)
errvmass.append(efferr)
volArray[(mc1, mc2)] = meanvol
vol2Array[(mc1, mc2)] = volerr
return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass
for j, mc in enumerate(mass_bins.centres()[0]):
# filter out injections not in this mass bin
newfound = filter_injections_by_mass(found, mass_bins, j, bin_type)
newmissed = filter_injections_by_mass(missed, mass_bins, j, bin_type)
foundArray[(mc, )] = len(newfound)
missedArray[(mc, )] = len(newmissed)
# compute the volume using this injection set
meaneff, efferr, meanvol, volerr = mean_efficiency_volume(
newfound, newmissed, dbins)
effvmass.append(meaneff)
errvmass.append(efferr)
volArray[(mc, )] = meanvol
vol2Array[(mc, )] = volerr
return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass
| gpl-3.0 |
westinedu/newertrends | mezzanine/utils/email.py | 4 | 2334 |
from django.contrib.auth.tokens import default_token_generator
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.template import loader, Context
from django.utils.http import int_to_base36
from mezzanine.conf import settings
def send_mail_template(subject, template, addr_from, addr_to, context=None,
attachments=None, fail_silently=False):
"""
Send email rendering text and html versions for the specified
template name using the context dictionary passed in.
"""
if context is None:
context = {}
if attachments is None:
attachments = []
# Allow for a single address to be passed in.
if not hasattr(addr_to, "__iter__"):
addr_to = [addr_to]
# Loads a template passing in vars as context.
render = lambda type: loader.get_template("%s.%s" %
(template, type)).render(Context(context))
# Create and send email.
msg = EmailMultiAlternatives(subject, render("txt"), addr_from, addr_to)
msg.attach_alternative(render("html"), "text/html")
for attachment in attachments:
msg.attach(*attachment)
msg.send(fail_silently=fail_silently)
def send_verification_mail(request, user, verifcation_type):
"""
Sends an email with a verification link to users when
``ACCOUNTS_VERIFICATION_REQUIRED`` is ```True`` and they're signing
up, or when they retrieve a lost password.
The ``verifcation_type`` arg is both the name of the urlpattern for
the verification link, as well as the names of the email templates
to use.
"""
verify_url = reverse(verifcation_type, kwargs={
"uidb36": int_to_base36(user.id),
"token": default_token_generator.make_token(user),
}) + "?next=" + request.GET.get("next", "/")
context = {
"request": request,
"user": user,
"verify_url": verify_url,
}
subject_template = "email/%s_subject.txt" % verifcation_type
subject = loader.get_template(subject_template).render(Context(context))
send_mail_template("".join(subject.splitlines()),
"email/%s" % verifcation_type,
settings.DEFAULT_FROM_EMAIL, user.email,
context=context, fail_silently=settings.DEBUG)
| bsd-3-clause |
gabrielecker/DogAdoption-Backend | views/dogs.py | 1 | 1667 | # encoding: utf-8
from flask import jsonify, request
from flask_login import login_required, current_user
from project.app import db
from project.models import Dog
from utils.rest import RestView
class DogAPI(RestView):
schema = 'Dog'
def get(self, id):
page = request.args.get('page') or 1
if id is None:
dogs = Dog.query.paginate(page=int(page), per_page=6)
return jsonify({
'items': self.list_parser.dump(dogs.items).data,
'page': dogs.page,
'total': dogs.total
})
else:
dog = Dog.query.get(id)
return jsonify(self.parser.dump(dog).data)
@login_required
def post(self):
data = request.get_json()
dog = Dog(**data)
dog.user = current_user
db.session.add(dog)
db.session.commit()
return self.make_response('Dog created successfully.', 201)
@login_required
def put(self, id):
data = request.get_json()
Dog.query.filter_by(id=id).update(data)
db.session.commit()
return self.make_response('Dog updated successfully.')
@login_required
def delete(self, id):
dog = Dog.query.get(id)
if dog:
db.session.delete(dog)
db.session.commit()
return self.make_response('Dog deleted successfully.')
else:
return self.make_response('Dog not found.', 404)
class UserDogsAPI(RestView):
schema = 'Dog'
@login_required
def get(self, id):
dogs = Dog.query.filter_by(user=current_user)
return jsonify(self.list_parser.dump(dogs).data)
| mit |
mrcslws/htmresearch | projects/feedback/feedback_sequences_additional.py | 7 | 24229 |
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file runs a number of experiments testing the effectiveness of feedback
with noisy inputs.
"""
import os
from copy import deepcopy
import numpy
import cPickle
import matplotlib
import matplotlib.pyplot as plt
import scipy.stats
matplotlib.rcParams['pdf.fonttype'] = 42
plt.ion()
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
import feedback_experiment
from feedback_experiment import FeedbackExperiment
def convertSequenceMachineSequence(generatedSequences):
"""
Convert a sequence from the SequenceMachine into a list of sequences, such
that each sequence is a list of set of SDRs.
"""
sequenceList = []
currentSequence = []
for s in generatedSequences:
if s is None:
sequenceList.append(currentSequence)
currentSequence = []
else:
currentSequence.append(s)
return sequenceList
def generateSequences(n=2048, w=40, sequenceLength=5, sequenceCount=2,
sharedRange=None, seed=42):
"""
Generate high order sequences using SequenceMachine
"""
# Lots of room for noise sdrs
patternAlphabetSize = 10*(sequenceLength * sequenceCount)
patternMachine = PatternMachine(n, w, patternAlphabetSize, seed)
sequenceMachine = SequenceMachine(patternMachine, seed)
numbers = sequenceMachine.generateNumbers(sequenceCount, sequenceLength,
sharedRange=sharedRange )
generatedSequences = sequenceMachine.generateFromNumbers(numbers)
return sequenceMachine, generatedSequences, numbers
def sparsenRange(sequenceMachine, sequences, startRange, endRange, probaZero):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p < endRange and p >= startRange:
newsdr = numpy.array(list(sdr))
keep = numpy.random.rand(len(newsdr)) > probaZero
newsdr = newsdr[keep==True]
newSequence.append(set(newsdr))
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def crossSequences(sequenceMachine, sequences, pos):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= pos:
newSequence.append(sequences[(numseq +1) % len(sequences)][p])
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def addTemporalNoise(sequenceMachine, sequences, noiseStart, noiseEnd, noiseProba):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= noiseStart and p < noiseEnd:
newsdr = patternMachine.addNoise(sdr, noiseProba)
newSequence.append(newsdr)
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def addPerturbation(sequenceMachine, sequences, noiseType, pos, number=1):
"""
"""
patternMachine = sequenceMachine.patternMachine
newSequences = []
for (numseq, s) in enumerate(sequences):
newSequence = []
for p,sdr in enumerate(s):
if p >= pos and p < pos+number:
if noiseType == "skip":
pass
elif noiseType == "replace":
newsdr = patternMachine.addNoise(sdr, 1.0)
newSequence.append(newsdr)
elif noiseType == "repeat":
newSequence.append(s[p-1])
else:
raise("Unrecognized Noise Type!")
else:
newSequence.append(sdr)
newSequences.append(newSequence)
return newSequences
def runInference(exp, sequences, enableFeedback=True, apicalTiebreak=True,
apicalModulationBasalThreshold=True, inertia=True):
"""
Run inference on this set of sequences and compute error
"""
if enableFeedback:
print "Feedback enabled: "
else:
print "Feedback disabled: "
error = 0
activityTraces = []
responses = []
for i,sequence in enumerate(sequences):
(avgActiveCells, avgPredictedActiveCells, activityTrace, responsesThisSeq) = exp.infer(
sequence, sequenceNumber=i, enableFeedback=enableFeedback, apicalTiebreak=apicalTiebreak,
apicalModulationBasalThreshold=apicalModulationBasalThreshold, inertia=inertia)
error += avgActiveCells
activityTraces.append(activityTrace)
responses.append(responsesThisSeq)
print " "
error /= len(sequences)
print "Average error = ",error
return error, activityTraces, responses
def runExp(noiseProba, numSequences, nbSeeds, noiseType, sequenceLen, sharedRange, noiseRange, whichPlot, plotTitle):
allowedNoises = ("skip", "replace", "repeat", "crossover", "pollute")
if noiseType not in allowedNoises:
raise(RuntimeError("noiseType must be one of the following: ".join(allowedNoises)))
meanErrsFB = []; meanErrsNoFB = []; meanErrsNoNoise = []
stdErrsFB = []; stdErrsNoFB = []; stdErrsNoNoise = []
meanPerfsFB = []; stdPerfsFB = []
meanPerfsNoFB = []; stdPerfsNoFB = []
stdsFB = []
stdsNoFB=[]
activitiesFB=[]; activitiesNoFB=[]
diffsFB = []
diffsNoFB = []
overlapsFBL2=[]; overlapsNoFBL2=[]
overlapsFBL2Next=[]; overlapsNoFBL2Next=[]
overlapsFBL4=[]; overlapsNoFBL4=[]
overlapsFBL4Next=[]; overlapsNoFBL4Next=[]
corrsPredCorrectFBL4=[]; corrsPredCorrectNoFBL4=[]
diffsFBL4Pred=[]; diffsNoFBL4Pred=[]
diffsFBL4PredNext=[]; diffsNoFBL4PredNext=[]
diffsFBL2=[]; diffsNoFBL2=[]
diffsFBL2Next=[]; diffsNoFBL2Next=[]
diffsNoAT = []; overlapsNoATL2=[]; overlapsNoATL2Next=[]; overlapsNoATL4=[]
overlapsNoATL4Next=[]
corrsPredCorrectNoATL4=[]; diffsNoATL4Pred=[]; diffsNoATL4PredNext=[]
diffsNoATL2=[]; diffsNoATL2Next=[]
diffsNoAM = []; overlapsNoAML2=[]; overlapsNoAML2Next=[]; overlapsNoAML4=[]
overlapsNoAML4Next=[]
corrsPredCorrectNoAML4=[]; diffsNoAML4Pred=[]; diffsNoAML4PredNext=[]
diffsNoAML2=[]; diffsNoAML2Next=[]
diffsNoIN = []; overlapsNoINL2=[]; overlapsNoINL2Next=[]; overlapsNoINL4=[]
overlapsNoINL4Next=[]
corrsPredCorrectNoINL4=[]; diffsNoINL4Pred=[]; diffsNoINL4PredNext=[]
diffsNoINL2=[]; diffsNoINL2Next=[]
errorsFB=[]; errorsNoFB=[]; errorsNoNoise=[]
perfsFB = []; perfsNoFB = []
#for probaZero in probaZeros:
seed = 42
for seedx in range(nbSeeds):
seed = seedx + 123
profile = False,
L4Overrides = {"cellsPerColumn": 8}
numpy.random.seed(seed)
# Create the sequences and arrays
print "Generating sequences..."
sequenceMachine, generatedSequences, numbers = generateSequences(
sequenceLength=sequenceLen, sequenceCount=numSequences,
sharedRange=sharedRange,
seed=seed)
sequences = convertSequenceMachineSequence(generatedSequences)
noisySequences = deepcopy(sequences)
# Apply noise to sequences
noisySequences = addTemporalNoise(sequenceMachine, noisySequences,
noiseStart=noiseRange[0], noiseEnd=noiseRange[1],
noiseProba=noiseProba)
# *In addition* to this, add crossover or single-point noise
if noiseType == "crossover":
noisySequences = crossSequences(sequenceMachine, noisySequences,
pos=sequenceLen/2)
elif noiseType in ("repeat", "replace", "skip"):
noisySequences = addPerturbation(sequenceMachine, noisySequences,
noiseType=noiseType, pos=sequenceLen/2, number=1)
inferenceErrors = []
#Setup experiment and train the network on sequences
print "Learning sequences..."
exp = FeedbackExperiment(
numLearningPasses= 2*sequenceLen, # To handle high order sequences
seed=seed,
L4Overrides=L4Overrides,
)
exp.learnSequences(sequences)
print "Number of columns in exp: ", exp.numColumns
print "Sequences learned!"
# Run inference without any noise. This becomes our baseline error
standardError, activityNoNoise, responsesNoNoise = runInference(exp, sequences)
inferenceErrors.append(standardError)
runError, activityFB, responsesFB = runInference(
exp, noisySequences, enableFeedback=True)
runError, activityNoFB, responsesNoFB = runInference(
exp, noisySequences, enableFeedback=False)
runError, activityNoAT, responsesNoAT = runInference(
exp, noisySequences, enableFeedback=True, apicalTiebreak=False)
runError, activityNoAT, responsesNoAM = runInference(
exp, noisySequences, enableFeedback=True, apicalModulationBasalThreshold=False)
runError, activityNoIN, responsesNoIN = runInference(
exp, noisySequences, enableFeedback=True, inertia=False)
# Now that actual processing is done, we compute various statistics and plot graphs.
seqlen = len(noisySequences[0])
sdrlen = 2048 * 8 # Should be the total number of cells in L4. Need to make this more parametrized!
for numseq in range(len(responsesNoNoise)):
diffsFB.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoFB.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsFBL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsFBL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoFBL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoFB[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsFBL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesFB[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoFBL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoFB[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoAT.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoATL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoAT[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoATL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoAT[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoATL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoATL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoAT[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoATL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAT[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoAM.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoAML2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoAM[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoAML2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoAM[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoAML4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoAML4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoAM[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoAML4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoAM[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
diffsNoIN.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoINL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].intersection(responsesNoIN[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoINL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].intersection(responsesNoIN[numseq]['L2Responses'][x])) for x in range(seqlen)] )
overlapsNoINL4.append( [len(responsesNoNoise[numseq]['L4Responses'][x].intersection(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
overlapsNoINL4Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L4Responses'][x].intersection(responsesNoIN[numseq]['L4Responses'][x])) for x in range(seqlen)] )
diffsNoINL4Pred.append( [len(responsesNoNoise[numseq]['L4Responses'][x].symmetric_difference(responsesNoIN[numseq]['L4Predicted'][x])) for x in range(seqlen)] )
cpcfb = []; cpcnofb=[]; cpcnoat=[]; cpcnoam=[]; cpcnoin=[];
for x in range(seqlen):
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesFB[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcfb.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoFB[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnofb.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoAT[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoat.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoAM[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoam.append(numpy.corrcoef(z1, z2)[0,1])
z1 = numpy.zeros(sdrlen+1); z1[list(responsesNoNoise[numseq]['L4Responses'][x])] = 1; z1[-1] = 1
z2 = numpy.zeros(sdrlen+1); z2[list(responsesNoIN[numseq]['L4Predicted'][x])] = 1; z2[-1] = 1
cpcnoin.append(numpy.corrcoef(z1, z2)[0,1])
# Note that the correlations are appended across all seeds and sequences
corrsPredCorrectNoFBL4.append(cpcnofb[1:])
corrsPredCorrectNoATL4.append(cpcnoat[1:])
corrsPredCorrectNoINL4.append(cpcnoin[1:])
corrsPredCorrectNoAML4.append(cpcnoam[1:])
corrsPredCorrectFBL4.append(cpcfb[1:])
# diffsFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].symmetric_difference(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsNoFBL2.append( [len(responsesNoNoise[numseq]['L2Responses'][x].symmetric_difference(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].symmetric_difference(responsesFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
# diffsNoFBL2Next.append( [len(responsesNoNoise[(numseq + 1) % numSequences]['L2Responses'][x].symmetric_difference(responsesNoFB[numseq]['L2Responses'][x])) for x in range(seqlen)] )
print "Size of L2 responses (FB):", [len(responsesFB[numseq]['L2Responses'][x]) for x in range(seqlen)]
print "Size of L2 responses (NoNoise):", [len(responsesNoNoise[numseq]['L2Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (FB):", [len(responsesFB[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoFB):", [len(responsesNoFB[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoAT):", [len(responsesNoAT[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoAM):", [len(responsesNoAM[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoIN):", [len(responsesNoIN[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 responses (NoNoise):", [len(responsesNoNoise[numseq]['L4Responses'][x]) for x in range(seqlen)]
print "Size of L4 predictions (FB):", [len(responsesFB[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoFB):", [len(responsesNoFB[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoAT):", [len(responsesNoAT[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoAM):", [len(responsesNoAM[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoIN):", [len(responsesNoIN[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "Size of L4 predictions (NoNoise):", [len(responsesNoNoise[numseq]['L4Predicted'][x]) for x in range(seqlen)]
print "L2 overlap with current (FB): ", overlapsFBL2[-1]
print "L4 overlap with current (FB): ", overlapsFBL4[-1]
print "L4 overlap with current (NoFB): ", overlapsNoFBL4[-1]
print "L4 correlation pred/correct (FB): ", corrsPredCorrectFBL4[-1]
print "L4 correlation pred/correct (NoFB): ", corrsPredCorrectNoFBL4[-1]
print "L4 correlation pred/correct (NoAT): ", corrsPredCorrectNoATL4[-1]
print "L4 correlation pred/correct (NoAM): ", corrsPredCorrectNoATL4[-1]
print "L4 correlation pred/correct (NoIN): ", corrsPredCorrectNoATL4[-1]
print "NoNoise sequence:", [list(x)[:2] for x in sequences[numseq]]
print "Noise sequence:", [list(x)[:2] for x in noisySequences[numseq]]
print "NoNoise L4 responses:", [list(x)[:2] for x in responsesNoNoise[numseq]['L4Responses']]
print "NoFB L4 responses:", [list(x)[:2] for x in responsesNoFB[numseq]['L4Responses']]
print ""
plt.figure()
allDataSets = (corrsPredCorrectFBL4, corrsPredCorrectNoFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4)
allmeans = [numpy.mean(x) for x in allDataSets]
allstds = [numpy.std(x) for x in allDataSets]
nbbars = len(allmeans)
plt.bar(2*(1+numpy.arange(nbbars))-.5, allmeans, 1.0, color='r', edgecolor='none', yerr=allstds, capsize=5, ecolor='k')
for nn in range(1, nbbars):
plt.vlines([2, 2 +2*nn], 1.2, 1.2+(nn/10.0), lw=2); plt.hlines(1.2+(nn/10.0), 2, 2+2*nn, lw=2)
pval = scipy.stats.ranksums(numpy.array(corrsPredCorrectFBL4).ravel(), numpy.array(allDataSets[nn]).ravel())[1]
if pval > 0.05:
pvallabel = ' o' #r'$o$'
elif pval > 0.01:
pvallabel = '*'
elif pval > 0.001:
pvallabel = '**'
else:
pvallabel = '***'
plt.text(3, 1.2+(nn/10.0)+.02, pvallabel, fontdict={"size":14})
plt.xticks(2*(1+numpy.arange(nbbars)), ('Full', 'No\nFB', 'No Earlier\nFiring', 'No Thresold\nModulation', 'No Slower\nDynamics'))
plt.ylabel("Avg. Prediction Performance");
plt.title(plotTitle)
plt.savefig(plotTitle+".png")
# scipy.stats.ranksums(numpy.array(corrsPredCorrectFBL4).ravel(), numpy.array(corrsPredCorrectNoATL4).ravel())
plt.show()
return (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4, corrsPredCorrectNoAML4, corrsPredCorrectNoINL4)
if __name__ == "__main__":
plt.ion()
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.3,
numSequences=5, nbSeeds=10, noiseType="pollute", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Continuous noise, shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.3,
numSequences=5, nbSeeds=10, noiseType="pollute", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Continuous noise, no shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.02,
numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Insert random stimulus, shared range")
(corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.02,
numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Insert random stimulus, no shared range")
# (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
# corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.25,
# numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(5,24), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Random insert + continuous noise, shared range")
#
# (corrsPredCorrectNoFBL4, corrsPredCorrectFBL4, corrsPredCorrectNoATL4,
# corrsPredCorrectNoAML4, corrsPredCorrectNoINL4) = runExp(noiseProba=.25,
# numSequences=5, nbSeeds=10, noiseType="replace", sequenceLen=30, sharedRange=(0,0), noiseRange=(0,30), whichPlot="corrspredcorrect", plotTitle="Individual effects: Random insert + continuous noise, no shared range")
| agpl-3.0 |
saguziel/incubator-airflow | airflow/hooks/zendesk_hook.py | 6 | 3835 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A hook to talk to Zendesk
"""
import logging
import time
from zdesk import Zendesk, RateLimitError, ZendeskError
from airflow.hooks import BaseHook
class ZendeskHook(BaseHook):
def __init__(self, zendesk_conn_id):
self.__zendesk_conn_id = zendesk_conn_id
self.__url = None
def get_conn(self):
conn = self.get_connection(self.__zendesk_conn_id)
self.__url = "https://" + conn.host
return Zendesk(self.__url, conn.login, conn.password, True)
def __handle_rate_limit_exception(self, rate_limit_exception):
"""
Sleep for the time specified in the exception. If not specified, wait
for 60 seconds.
"""
retry_after = int(
rate_limit_exception.response.headers.get('Retry-After', 60))
logging.info(
"Hit Zendesk API rate limit. Pausing for {} "
"seconds".format(
retry_after))
time.sleep(retry_after)
def call(self, path, query=None, get_all_pages=True):
"""
Call Zendesk API and return results
:param path: The Zendesk API to call
:param query: Query parameters
:param get_all_pages: Accumulate results over all pages before
returning. Due to strict rate limiting, this can often timeout.
Waits for recommended period between tries after a timeout.
"""
zendesk = self.get_conn()
first_request_successful = False
while not first_request_successful:
try:
results = zendesk.call(path, query)
first_request_successful = True
except RateLimitError as rle:
self.__handle_rate_limit_exception(rle)
# Find the key with the results
key = path.split("/")[-1].split(".json")[0]
next_page = results['next_page']
results = results[key]
if get_all_pages:
while next_page is not None:
try:
# Need to split because the next page URL has
# `github.zendesk...`
# in it, but the call function needs it removed.
next_url = next_page.split(self.__url)[1]
logging.info("Calling {}".format(next_url))
more_res = zendesk.call(next_url)
results.extend(more_res[key])
if next_page == more_res['next_page']:
# Unfortunately zdesk doesn't always throw ZendeskError
# when we are done getting all the data. Sometimes the
# next just refers to the current set of results. Hence,
# need to deal with this special case
break
else:
next_page = more_res['next_page']
except RateLimitError as rle:
self.__handle_rate_limit_exception(rle)
except ZendeskError as ze:
if b"Use a start_time older than 5 minutes" in ze.msg:
# We have pretty up to date data
break
else:
raise ze
return results
| apache-2.0 |
akipta/autokey | src/lib/interface.py | 46 | 44692 | # -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ["XRecordInterface", "AtSpiInterface"]
import os, threading, re, time, socket, select, logging, Queue, subprocess
try:
import pyatspi
HAS_ATSPI = True
except ImportError:
HAS_ATSPI = False
from Xlib import X, XK, display, error
try:
from Xlib.ext import record, xtest
HAS_RECORD = True
except ImportError:
HAS_RECORD = False
from Xlib.protocol import rq, event
import common
if common.USING_QT:
from PyQt4.QtGui import QClipboard, QApplication
else:
from gi.repository import Gtk, Gdk
logger = logging.getLogger("interface")
MASK_INDEXES = [
(X.ShiftMapIndex, X.ShiftMask),
(X.ControlMapIndex, X.ControlMask),
(X.LockMapIndex, X.LockMask),
(X.Mod1MapIndex, X.Mod1Mask),
(X.Mod2MapIndex, X.Mod2Mask),
(X.Mod3MapIndex, X.Mod3Mask),
(X.Mod4MapIndex, X.Mod4Mask),
(X.Mod5MapIndex, X.Mod5Mask),
]
CAPSLOCK_LEDMASK = 1<<0
NUMLOCK_LEDMASK = 1<<1
class XInterfaceBase(threading.Thread):
"""
Encapsulates the common functionality for the two X interface classes.
"""
def __init__(self, mediator, app):
threading.Thread.__init__(self)
self.setDaemon(True)
self.setName("XInterface-thread")
self.mediator = mediator
self.app = app
self.lastChars = [] # QT4 Workaround
self.__enableQT4Workaround = False # QT4 Workaround
self.shutdown = False
# Event loop
self.eventThread = threading.Thread(target=self.__eventLoop)
self.queue = Queue.Queue()
# Event listener
self.listenerThread = threading.Thread(target=self.__flushEvents)
if common.USING_QT:
self.clipBoard = QApplication.clipboard()
else:
self.clipBoard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.selection = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY)
self.__initMappings()
# Set initial lock state
ledMask = self.localDisplay.get_keyboard_control().led_mask
mediator.set_modifier_state(Key.CAPSLOCK, (ledMask & CAPSLOCK_LEDMASK) != 0)
mediator.set_modifier_state(Key.NUMLOCK, (ledMask & NUMLOCK_LEDMASK) != 0)
# Window name atoms
self.__NameAtom = self.localDisplay.intern_atom("_NET_WM_NAME", True)
self.__VisibleNameAtom = self.localDisplay.intern_atom("_NET_WM_VISIBLE_NAME", True)
if not common.USING_QT:
self.keyMap = Gdk.Keymap.get_default()
self.keyMap.connect("keys-changed", self.on_keys_changed)
self.__ignoreRemap = False
self.eventThread.start()
self.listenerThread.start()
def __eventLoop(self):
while True:
method, args = self.queue.get()
if method is None and args is None:
break
try:
method(*args)
except Exception, e:
logger.exception("Error in X event loop thread")
self.queue.task_done()
def __enqueue(self, method, *args):
self.queue.put_nowait((method, args))
def on_keys_changed(self, data=None):
if not self.__ignoreRemap:
logger.debug("Recorded keymap change event")
self.__ignoreRemap = True
time.sleep(0.2)
self.__enqueue(self.__ungrabAllHotkeys)
self.__enqueue(self.__delayedInitMappings)
else:
logger.debug("Ignored keymap change event")
def __delayedInitMappings(self):
self.__initMappings()
self.__ignoreRemap = False
def __initMappings(self):
self.localDisplay = display.Display()
self.rootWindow = self.localDisplay.screen().root
self.rootWindow.change_attributes(event_mask=X.SubstructureNotifyMask|X.StructureNotifyMask)
altList = self.localDisplay.keysym_to_keycodes(XK.XK_ISO_Level3_Shift)
self.__usableOffsets = (0, 1)
for code, offset in altList:
if code == 108 and offset == 0:
self.__usableOffsets += (4, 5)
logger.debug("Enabling sending using Alt-Grid")
break
# Build modifier mask mapping
self.modMasks = {}
mapping = self.localDisplay.get_modifier_mapping()
for keySym, ak in XK_TO_AK_MAP.iteritems():
if ak in MODIFIERS:
keyCodeList = self.localDisplay.keysym_to_keycodes(keySym)
found = False
for keyCode, lvl in keyCodeList:
for index, mask in MASK_INDEXES:
if keyCode in mapping[index]:
self.modMasks[ak] = mask
found = True
break
if found: break
logger.debug("Modifier masks: %r", self.modMasks)
self.__grabHotkeys()
self.localDisplay.flush()
# --- get list of keycodes that are unused in the current keyboard mapping
keyCode = 8
avail = []
for keyCodeMapping in self.localDisplay.get_keyboard_mapping(keyCode, 200):
codeAvail = True
for offset in keyCodeMapping:
if offset != 0:
codeAvail = False
break
if codeAvail:
avail.append(keyCode)
keyCode += 1
self.__availableKeycodes = avail
self.remappedChars = {}
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
self.keymap_test()
def keymap_test(self):
code = self.localDisplay.keycode_to_keysym(108, 0)
for attr in XK.__dict__.iteritems():
if attr[0].startswith("XK"):
if attr[1] == code:
logger.debug("Alt-Grid: %s, %s", attr[0], attr[1])
logger.debug(repr(self.localDisplay.keysym_to_keycodes(XK.XK_ISO_Level3_Shift)))
logger.debug("X Server Keymap")
for char in "\\|`1234567890-=~!@#$%^&*()qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:\"ZXCVBNM<>?":
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
if len(keyCodeList) > 0:
logger.debug("[%s] : %s", char, keyCodeList)
else:
logger.debug("No mapping for [%s]", char)
def __needsMutterWorkaround(self, item):
if Key.SUPER not in item.modifiers:
return False
try:
output = subprocess.check_output(["ps", "-eo", "command"])
lines = output.splitlines()
for line in lines:
if "gnome-shell" in line or "cinnamon" in line or "unity" in line:
return True
except:
pass # since this is just a nasty workaround, if anything goes wrong just disable it
return False
def __grabHotkeys(self):
"""
Run during startup to grab global and specific hotkeys in all open windows
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
# Grab global hotkeys in root window
for item in c.globalHotkeys:
if item.enabled:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
# Grab hotkeys without a filter in root window
for item in hotkeys:
if item.get_applicable_regex() is None:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
self.__enqueue(self.__recurseTree, self.rootWindow, hotkeys)
def __recurseTree(self, parent, hotkeys):
# Grab matching hotkeys in all open child windows
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
try:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
if title or klass:
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__grabHotkey(item.hotKey, item.modifiers, window)
self.__grabRecurse(item, window, False)
self.__enqueue(self.__recurseTree, window, hotkeys)
except:
logger.exception("grab on window failed")
def __ungrabAllHotkeys(self):
"""
Ungrab all hotkeys in preparation for keymap change
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
# Ungrab global hotkeys in root window, recursively
for item in c.globalHotkeys:
if item.enabled:
self.__ungrabHotkey(item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__ungrabRecurse(item, self.rootWindow, False)
# Ungrab hotkeys without a filter in root window, recursively
for item in hotkeys:
if item.get_applicable_regex() is None:
self.__ungrabHotkey(item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__ungrabRecurse(item, self.rootWindow, False)
self.__recurseTreeUngrab(self.rootWindow, hotkeys)
def __recurseTreeUngrab(self, parent, hotkeys):
# Ungrab matching hotkeys in all open child windows
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
try:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
if title or klass:
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__ungrabHotkey(item.hotKey, item.modifiers, window)
self.__ungrabRecurse(item, window, False)
self.__enqueue(self.__recurseTreeUngrab, window, hotkeys)
except:
logger.exception("ungrab on window failed")
def __grabHotkeysForWindow(self, window):
"""
Grab all hotkeys relevant to the window
Used when a new window is created
"""
c = self.app.configManager
hotkeys = c.hotKeys + c.hotKeyFolders
title = self.get_window_title(window)
klass = self.get_window_class(window)
for item in hotkeys:
if item.get_applicable_regex() is not None and item._should_trigger_window_title((title, klass)):
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, window)
elif self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, window)
def __grabHotkey(self, key, modifiers, window):
"""
Grab a specific hotkey in the given window
"""
logger.debug("Grabbing hotkey: %r %r", modifiers, key)
try:
keycode = self.__lookupKeyCode(key)
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
window.grab_key(keycode, mask, True, X.GrabModeAsync, X.GrabModeAsync)
if Key.NUMLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
if Key.CAPSLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.CAPSLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks:
window.grab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK], True, X.GrabModeAsync, X.GrabModeAsync)
except Exception, e:
logger.warn("Failed to grab hotkey %r %r: %s", modifiers, key, str(e))
def grab_hotkey(self, item):
"""
Grab a hotkey.
If the hotkey has no filter regex, it is global and is grabbed recursively from the root window
If it has a filter regex, iterate over all children of the root and grab from matching windows
"""
if item.get_applicable_regex() is None:
self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__grabRecurse, item, self.rootWindow, False)
else:
self.__enqueue(self.__grabRecurse, item, self.rootWindow)
def __grabRecurse(self, item, parent, checkWinInfo=True):
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
shouldTrigger = False
if checkWinInfo:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
shouldTrigger = item._should_trigger_window_title((title, klass))
if shouldTrigger or not checkWinInfo:
self.__grabHotkey(item.hotKey, item.modifiers, window)
self.__grabRecurse(item, window, False)
else:
self.__grabRecurse(item, window)
def ungrab_hotkey(self, item):
"""
Ungrab a hotkey.
If the hotkey has no filter regex, it is global and is grabbed recursively from the root window
If it has a filter regex, iterate over all children of the root and ungrab from matching windows
"""
import copy
newItem = copy.copy(item)
if item.get_applicable_regex() is None:
self.__enqueue(self.__ungrabHotkey, newItem.hotKey, newItem.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__ungrabRecurse, newItem, self.rootWindow, False)
else:
self.__enqueue(self.__ungrabRecurse, newItem, self.rootWindow)
def __ungrabRecurse(self, item, parent, checkWinInfo=True):
try:
children = parent.query_tree().children
except:
return # window has been destroyed
for window in children:
shouldTrigger = False
if checkWinInfo:
title = self.get_window_title(window, False)
klass = self.get_window_class(window, False)
shouldTrigger = item._should_trigger_window_title((title, klass))
if shouldTrigger or not checkWinInfo:
self.__ungrabHotkey(item.hotKey, item.modifiers, window)
self.__ungrabRecurse(item, window, False)
else:
self.__ungrabRecurse(item, window)
def __ungrabHotkey(self, key, modifiers, window):
"""
Ungrab a specific hotkey in the given window
"""
logger.debug("Ungrabbing hotkey: %r %r", modifiers, key)
try:
keycode = self.__lookupKeyCode(key)
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
window.ungrab_key(keycode, mask)
if Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.NUMLOCK])
if Key.CAPSLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK])
if Key.CAPSLOCK in self.modMasks and Key.NUMLOCK in self.modMasks:
window.ungrab_key(keycode, mask|self.modMasks[Key.CAPSLOCK]|self.modMasks[Key.NUMLOCK])
except Exception, e:
logger.warn("Failed to ungrab hotkey %r %r: %s", modifiers, key, str(e))
def lookup_string(self, keyCode, shifted, numlock, altGrid):
if keyCode == 0:
return "<unknown>"
keySym = self.localDisplay.keycode_to_keysym(keyCode, 0)
if keySym in XK_TO_AK_NUMLOCKED and numlock and not (numlock and shifted):
return XK_TO_AK_NUMLOCKED[keySym]
elif keySym in XK_TO_AK_MAP:
return XK_TO_AK_MAP[keySym]
else:
try:
index = 0
if shifted: index += 1
if altGrid: index += 4
return unichr(self.localDisplay.keycode_to_keysym(keyCode, index))
except ValueError:
return "<code%d>" % keyCode
def send_string_clipboard(self, string, pasteCommand):
self.__enqueue(self.__sendStringClipboard, string, pasteCommand)
def __sendStringClipboard(self, string, pasteCommand):
logger.debug("Sending string: %r", string)
if pasteCommand is None:
if common.USING_QT:
self.sem = threading.Semaphore(0)
self.app.exec_in_main(self.__fillSelection, string)
self.sem.acquire()
else:
self.__fillSelection(string)
focus = self.localDisplay.get_input_focus().focus
xtest.fake_input(focus, X.ButtonPress, X.Button2)
xtest.fake_input(focus, X.ButtonRelease, X.Button2)
else:
if common.USING_QT:
self.sem = threading.Semaphore(0)
self.app.exec_in_main(self.__fillClipboard, string)
self.sem.acquire()
else:
self.__fillClipboard(string)
self.mediator.send_string(pasteCommand)
if common.USING_QT:
self.app.exec_in_main(self.__restoreClipboard)
logger.debug("Send via clipboard done")
def __restoreClipboard(self):
if self.__savedClipboard != "":
if common.USING_QT:
self.clipBoard.setText(self.__savedClipboard, QClipboard.Clipboard)
else:
Gdk.threads_enter()
self.clipBoard.set_text(self.__savedClipboard)
Gdk.threads_leave()
def __fillSelection(self, string):
if common.USING_QT:
self.clipBoard.setText(string, QClipboard.Selection)
self.sem.release()
else:
Gdk.threads_enter()
self.selection.set_text(string.encode("utf-8"))
Gdk.threads_leave()
def __fillClipboard(self, string):
if common.USING_QT:
self.__savedClipboard = self.clipBoard.text()
self.clipBoard.setText(string, QClipboard.Clipboard)
self.sem.release()
else:
Gdk.threads_enter()
text = self.clipBoard.wait_for_text()
self.__savedClipboard = ''
if text is not None: self.__savedClipboard = text
self.clipBoard.set_text(string.encode("utf-8"))
Gdk.threads_leave()
def begin_send(self):
self.__enqueue(self.__grab_keyboard)
def finish_send(self):
self.__enqueue(self.__ungrabKeyboard)
def grab_keyboard(self):
self.__enqueue(self.__grab_keyboard)
def __grab_keyboard(self):
focus = self.localDisplay.get_input_focus().focus
focus.grab_keyboard(True, X.GrabModeAsync, X.GrabModeAsync, X.CurrentTime)
self.localDisplay.flush()
def ungrab_keyboard(self):
self.__enqueue(self.__ungrabKeyboard)
def __ungrabKeyboard(self):
self.localDisplay.ungrab_keyboard(X.CurrentTime)
self.localDisplay.flush()
def __findUsableKeycode(self, codeList):
for code, offset in codeList:
if offset in self.__usableOffsets:
return code, offset
return None, None
def send_string(self, string):
self.__enqueue(self.__sendString, string)
def __sendString(self, string):
"""
Send a string of printable characters.
"""
logger.debug("Sending string: %r", string)
# Determine if workaround is needed
if not ConfigManager.SETTINGS[ENABLE_QT4_WORKAROUND]:
self.__checkWorkaroundNeeded()
# First find out if any chars need remapping
remapNeeded = False
for char in string:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
usableCode, offset = self.__findUsableKeycode(keyCodeList)
if usableCode is None and char not in self.remappedChars:
remapNeeded = True
break
# Now we know chars need remapping, do it
if remapNeeded:
self.__ignoreRemap = True
self.remappedChars = {}
remapChars = []
for char in string:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
usableCode, offset = self.__findUsableKeycode(keyCodeList)
if usableCode is None:
remapChars.append(char)
logger.debug("Characters requiring remapping: %r", remapChars)
availCodes = self.__availableKeycodes
logger.debug("Remapping with keycodes in the range: %r", availCodes)
mapping = self.localDisplay.get_keyboard_mapping(8, 200)
firstCode = 8
for i in xrange(len(availCodes) - 1):
code = availCodes[i]
sym1 = 0
sym2 = 0
if len(remapChars) > 0:
char = remapChars.pop(0)
self.remappedChars[char] = (code, 0)
sym1 = ord(char)
if len(remapChars) > 0:
char = remapChars.pop(0)
self.remappedChars[char] = (code, 1)
sym2 = ord(char)
if sym1 != 0:
mapping[code - firstCode][0] = sym1
mapping[code - firstCode][1] = sym2
mapping = [tuple(l) for l in mapping]
self.localDisplay.change_keyboard_mapping(firstCode, mapping)
self.localDisplay.flush()
focus = self.localDisplay.get_input_focus().focus
for char in string:
try:
keyCodeList = self.localDisplay.keysym_to_keycodes(ord(char))
keyCode, offset = self.__findUsableKeycode(keyCodeList)
if keyCode is not None:
if offset == 0:
self.__sendKeyCode(keyCode, theWindow=focus)
if offset == 1:
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
if offset == 4:
self.__pressKey(Key.ALT_GR)
self.__sendKeyCode(keyCode, self.modMasks[Key.ALT_GR], focus)
self.__releaseKey(Key.ALT_GR)
if offset == 5:
self.__pressKey(Key.ALT_GR)
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.ALT_GR]|self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
self.__releaseKey(Key.ALT_GR)
elif char in self.remappedChars:
keyCode, offset = self.remappedChars[char]
if offset == 0:
self.__sendKeyCode(keyCode, theWindow=focus)
if offset == 1:
self.__pressKey(Key.SHIFT)
self.__sendKeyCode(keyCode, self.modMasks[Key.SHIFT], focus)
self.__releaseKey(Key.SHIFT)
else:
logger.warn("Unable to send character %r", char)
except Exception, e:
logger.exception("Error sending char %r: %s", char, str(e))
self.__ignoreRemap = False
def send_key(self, keyName):
"""
Send a specific non-printing key, eg Up, Left, etc
"""
self.__enqueue(self.__sendKey, keyName)
def __sendKey(self, keyName):
logger.debug("Send special key: [%r]", keyName)
self.__sendKeyCode(self.__lookupKeyCode(keyName))
def fake_keypress(self, keyName):
self.__enqueue(self.__fakeKeypress, keyName)
def __fakeKeypress(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyPress, keyCode)
xtest.fake_input(self.rootWindow, X.KeyRelease, keyCode)
def fake_keydown(self, keyName):
self.__enqueue(self.__fakeKeydown, keyName)
def __fakeKeydown(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyPress, keyCode)
def fake_keyup(self, keyName):
self.__enqueue(self.__fakeKeyup, keyName)
def __fakeKeyup(self, keyName):
keyCode = self.__lookupKeyCode(keyName)
xtest.fake_input(self.rootWindow, X.KeyRelease, keyCode)
def send_modified_key(self, keyName, modifiers):
"""
Send a modified key (e.g. when emulating a hotkey)
"""
self.__enqueue(self.__sendModifiedKey, keyName, modifiers)
def __sendModifiedKey(self, keyName, modifiers):
logger.debug("Send modified key: modifiers: %s key: %s", modifiers, keyName)
try:
mask = 0
for mod in modifiers:
mask |= self.modMasks[mod]
keyCode = self.__lookupKeyCode(keyName)
for mod in modifiers: self.__pressKey(mod)
self.__sendKeyCode(keyCode, mask)
for mod in modifiers: self.__releaseKey(mod)
except Exception, e:
logger.warn("Error sending modified key %r %r: %s", modifiers, keyName, str(e))
def send_mouse_click(self, xCoord, yCoord, button, relative):
self.__enqueue(self.__sendMouseClick, xCoord, yCoord, button, relative)
def __sendMouseClick(self, xCoord, yCoord, button, relative):
# Get current pointer position so we can return it there
pos = self.rootWindow.query_pointer()
if relative:
focus = self.localDisplay.get_input_focus().focus
focus.warp_pointer(xCoord, yCoord)
xtest.fake_input(focus, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(focus, X.ButtonRelease, button, x=xCoord, y=yCoord)
else:
self.rootWindow.warp_pointer(xCoord, yCoord)
xtest.fake_input(self.rootWindow, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(self.rootWindow, X.ButtonRelease, button, x=xCoord, y=yCoord)
self.rootWindow.warp_pointer(pos.root_x, pos.root_y)
self.__flush()
def send_mouse_click_relative(self, xoff, yoff, button):
self.__enqueue(self.__sendMouseClickRelative, xoff, yoff, button)
def __sendMouseClickRelative(self, xoff, yoff, button):
# Get current pointer position
pos = self.rootWindow.query_pointer()
xCoord = pos.root_x + xoff
yCoord = pos.root_y + yoff
self.rootWindow.warp_pointer(xCoord, yCoord)
xtest.fake_input(self.rootWindow, X.ButtonPress, button, x=xCoord, y=yCoord)
xtest.fake_input(self.rootWindow, X.ButtonRelease, button, x=xCoord, y=yCoord)
self.rootWindow.warp_pointer(pos.root_x, pos.root_y)
self.__flush()
def flush(self):
self.__enqueue(self.__flush)
def __flush(self):
self.localDisplay.flush()
self.lastChars = []
def press_key(self, keyName):
self.__enqueue(self.__pressKey, keyName)
def __pressKey(self, keyName):
self.__sendKeyPressEvent(self.__lookupKeyCode(keyName), 0)
def release_key(self, keyName):
self.__enqueue(self.__releaseKey, keyName)
def __releaseKey(self, keyName):
self.__sendKeyReleaseEvent(self.__lookupKeyCode(keyName), 0)
def __flushEvents(self):
while True:
try:
readable, w, e = select.select([self.localDisplay], [], [], 1)
time.sleep(1)
if self.localDisplay in readable:
createdWindows = []
destroyedWindows = []
for x in xrange(self.localDisplay.pending_events()):
event = self.localDisplay.next_event()
if event.type == X.CreateNotify:
createdWindows.append(event.window)
if event.type == X.DestroyNotify:
destroyedWindows.append(event.window)
for window in createdWindows:
if window not in destroyedWindows:
self.__enqueue(self.__grabHotkeysForWindow, window)
if self.shutdown:
break
except:
pass
def handle_keypress(self, keyCode):
self.__enqueue(self.__handleKeyPress, keyCode)
def __handleKeyPress(self, keyCode):
focus = self.localDisplay.get_input_focus().focus
modifier = self.__decodeModifier(keyCode)
if modifier is not None:
self.mediator.handle_modifier_down(modifier)
else:
self.mediator.handle_keypress(keyCode, self.get_window_title(focus), self.get_window_class(focus))
def handle_keyrelease(self, keyCode):
self.__enqueue(self.__handleKeyrelease, keyCode)
def __handleKeyrelease(self, keyCode):
modifier = self.__decodeModifier(keyCode)
if modifier is not None:
self.mediator.handle_modifier_up(modifier)
def handle_mouseclick(self, button, x, y):
self.__enqueue(self.__handleMouseclick, button, x, y)
def __handleMouseclick(self, button, x, y):
title = self.get_window_title()
klass = self.get_window_class()
info = (title, klass)
if x is None and y is None:
ret = self.localDisplay.get_input_focus().focus.query_pointer()
self.mediator.handle_mouse_click(ret.root_x, ret.root_y, ret.win_x, ret.win_y, button, info)
else:
focus = self.localDisplay.get_input_focus().focus
try:
rel = focus.translate_coords(self.rootWindow, x, y)
self.mediator.handle_mouse_click(x, y, rel.x, rel.y, button, info)
except:
self.mediator.handle_mouse_click(x, y, 0, 0, button, info)
def __decodeModifier(self, keyCode):
"""
Checks if the given keyCode is a modifier key. If it is, returns the modifier name
constant as defined in the iomediator module. If not, returns C{None}
"""
keyName = self.lookup_string(keyCode, False, False, False)
if keyName in MODIFIERS:
return keyName
return None
def __sendKeyCode(self, keyCode, modifiers=0, theWindow=None):
if ConfigManager.SETTINGS[ENABLE_QT4_WORKAROUND] or self.__enableQT4Workaround:
self.__doQT4Workaround(keyCode)
self.__sendKeyPressEvent(keyCode, modifiers, theWindow)
self.__sendKeyReleaseEvent(keyCode, modifiers, theWindow)
def __checkWorkaroundNeeded(self):
focus = self.localDisplay.get_input_focus().focus
windowName = self.get_window_title(focus)
windowClass = self.get_window_class(focus)
w = self.app.configManager.workAroundApps
if w.match(windowName) or w.match(windowClass):
self.__enableQT4Workaround = True
else:
self.__enableQT4Workaround = False
def __doQT4Workaround(self, keyCode):
if len(self.lastChars) > 0:
if keyCode in self.lastChars:
self.localDisplay.flush()
time.sleep(0.0125)
self.lastChars.append(keyCode)
if len(self.lastChars) > 10:
self.lastChars.pop(0)
def __sendKeyPressEvent(self, keyCode, modifiers, theWindow=None):
if theWindow is None:
focus = self.localDisplay.get_input_focus().focus
else:
focus = theWindow
keyEvent = event.KeyPress(
detail=keyCode,
time=X.CurrentTime,
root=self.rootWindow,
window=focus,
child=X.NONE,
root_x=1,
root_y=1,
event_x=1,
event_y=1,
state=modifiers,
same_screen=1
)
focus.send_event(keyEvent)
def __sendKeyReleaseEvent(self, keyCode, modifiers, theWindow=None):
if theWindow is None:
focus = self.localDisplay.get_input_focus().focus
else:
focus = theWindow
keyEvent = event.KeyRelease(
detail=keyCode,
time=X.CurrentTime,
root=self.rootWindow,
window=focus,
child=X.NONE,
root_x=1,
root_y=1,
event_x=1,
event_y=1,
state=modifiers,
same_screen=1
)
focus.send_event(keyEvent)
def __lookupKeyCode(self, char):
if char in AK_TO_XK_MAP:
return self.localDisplay.keysym_to_keycode(AK_TO_XK_MAP[char])
elif char.startswith("<code"):
return int(char[5:-1])
else:
try:
return self.localDisplay.keysym_to_keycode(ord(char))
except Exception, e:
logger.error("Unknown key name: %s", char)
raise
def get_window_title(self, window=None, traverse=True):
try:
if window is None:
windowvar = self.localDisplay.get_input_focus().focus
else:
windowvar = window
return self.__getWinTitle(windowvar, traverse)
except:
return ""
def __getWinTitle(self, windowvar, traverse):
atom = windowvar.get_property(self.__VisibleNameAtom, 0, 0, 255)
if atom is None:
atom = windowvar.get_property(self.__NameAtom, 0, 0, 255)
if atom:
return atom.value.decode("utf-8")
elif traverse:
return self.__getWinTitle(windowvar.query_tree().parent, True)
else:
return ""
def get_window_class(self, window=None, traverse=True):
try:
if window is None:
windowvar = self.localDisplay.get_input_focus().focus
else:
windowvar = window
return self.__getWinClass(windowvar, traverse)
except:
return ""
def __getWinClass(self, windowvar, traverse):
wmclass = windowvar.get_wm_class()
if (wmclass == None or wmclass == ""):
if traverse:
return self.__getWinClass(windowvar.query_tree().parent, True)
else:
return ""
return wmclass[0] + '.' + wmclass[1]
def cancel(self):
self.queue.put_nowait((None, None))
self.shutdown = True
self.listenerThread.join()
self.eventThread.join()
self.localDisplay.flush()
self.localDisplay.close()
self.join()
class XRecordInterface(XInterfaceBase):
def initialise(self):
self.recordDisplay = display.Display()
self.__locksChecked = False
# Check for record extension
if not self.recordDisplay.has_extension("RECORD"):
raise Exception("Your X-Server does not have the RECORD extension available/enabled.")
def run(self):
# Create a recording context; we only want key and mouse events
self.ctx = self.recordDisplay.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyPress, X.ButtonPress), #X.KeyRelease,
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
# Enable the context; this only returns after a call to record_disable_context,
# while calling the callback function in the meantime
logger.info("XRecord interface thread starting")
self.recordDisplay.record_enable_context(self.ctx, self.__processEvent)
# Finally free the context
self.recordDisplay.record_free_context(self.ctx)
self.recordDisplay.close()
def cancel(self):
self.localDisplay.record_disable_context(self.ctx)
XInterfaceBase.cancel(self)
def __processEvent(self, reply):
if reply.category != record.FromServer:
return
if reply.client_swapped:
return
if not len(reply.data) or ord(reply.data[0]) < 2:
# not an event
return
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(data, self.recordDisplay.display, None, None)
if event.type == X.KeyPress:
self.handle_keypress(event.detail)
elif event.type == X.KeyRelease:
self.handle_keyrelease(event.detail)
elif event.type == X.ButtonPress:
self.handle_mouseclick(event.detail, event.root_x, event.root_y)
class AtSpiInterface(XInterfaceBase):
def initialise(self):
self.registry = pyatspi.Registry
def start(self):
logger.info("AT-SPI interface thread starting")
self.registry.registerKeystrokeListener(self.__processKeyEvent, mask=pyatspi.allModifiers())
self.registry.registerEventListener(self.__processMouseEvent, 'mouse:button')
def cancel(self):
self.registry.deregisterKeystrokeListener(self.__processKeyEvent, mask=pyatspi.allModifiers())
self.registry.deregisterEventListener(self.__processMouseEvent, 'mouse:button')
self.registry.stop()
XInterfaceBase.cancel(self)
def __processKeyEvent(self, event):
if event.type == pyatspi.KEY_PRESSED_EVENT:
self.handle_keypress(event.hw_code)
else:
self.handle_keyrelease(event.hw_code)
def __processMouseEvent(self, event):
if event.type[-1] == 'p':
button = int(event.type[-2])
self.handle_mouseclick(button, event.detail1, event.detail2)
def __pumpEvents(self):
pyatspi.Registry.pumpQueuedEvents()
return True
from iomediator import Key, MODIFIERS
from configmanager import *
XK.load_keysym_group('xkb')
XK_TO_AK_MAP = {
XK.XK_Shift_L : Key.SHIFT,
XK.XK_Shift_R : Key.SHIFT,
XK.XK_Caps_Lock : Key.CAPSLOCK,
XK.XK_Control_L : Key.CONTROL,
XK.XK_Control_R : Key.CONTROL,
XK.XK_Alt_L : Key.ALT,
XK.XK_Alt_R : Key.ALT,
XK.XK_ISO_Level3_Shift : Key.ALT_GR,
XK.XK_Super_L : Key.SUPER,
XK.XK_Super_R : Key.SUPER,
XK.XK_Hyper_L : Key.HYPER,
XK.XK_Hyper_R : Key.HYPER,
XK.XK_Meta_L : Key.META,
XK.XK_Meta_R : Key.META,
XK.XK_Num_Lock : Key.NUMLOCK,
#SPACE : Key.SPACE,
XK.XK_Tab : Key.TAB,
XK.XK_Left : Key.LEFT,
XK.XK_Right : Key.RIGHT,
XK.XK_Up : Key.UP,
XK.XK_Down : Key.DOWN,
XK.XK_Return : Key.ENTER,
XK.XK_BackSpace : Key.BACKSPACE,
XK.XK_Scroll_Lock : Key.SCROLL_LOCK,
XK.XK_Print : Key.PRINT_SCREEN,
XK.XK_Pause : Key.PAUSE,
XK.XK_Menu : Key.MENU,
XK.XK_F1 : Key.F1,
XK.XK_F2 : Key.F2,
XK.XK_F3 : Key.F3,
XK.XK_F4 : Key.F4,
XK.XK_F5 : Key.F5,
XK.XK_F6 : Key.F6,
XK.XK_F7 : Key.F7,
XK.XK_F8 : Key.F8,
XK.XK_F9 : Key.F9,
XK.XK_F10 : Key.F10,
XK.XK_F11 : Key.F11,
XK.XK_F12 : Key.F12,
XK.XK_Escape : Key.ESCAPE,
XK.XK_Insert : Key.INSERT,
XK.XK_Delete : Key.DELETE,
XK.XK_Home : Key.HOME,
XK.XK_End : Key.END,
XK.XK_Page_Up : Key.PAGE_UP,
XK.XK_Page_Down : Key.PAGE_DOWN,
XK.XK_KP_Insert : Key.NP_INSERT,
XK.XK_KP_Delete : Key.NP_DELETE,
XK.XK_KP_End : Key.NP_END,
XK.XK_KP_Down : Key.NP_DOWN,
XK.XK_KP_Page_Down : Key.NP_PAGE_DOWN,
XK.XK_KP_Left : Key.NP_LEFT,
XK.XK_KP_Begin : Key.NP_5,
XK.XK_KP_Right : Key.NP_RIGHT,
XK.XK_KP_Home : Key.NP_HOME,
XK.XK_KP_Up: Key.NP_UP,
XK.XK_KP_Page_Up : Key.NP_PAGE_UP,
XK.XK_KP_Divide : Key.NP_DIVIDE,
XK.XK_KP_Multiply : Key.NP_MULTIPLY,
XK.XK_KP_Add : Key.NP_ADD,
XK.XK_KP_Subtract : Key.NP_SUBTRACT,
XK.XK_KP_Enter : Key.ENTER,
XK.XK_space : ' '
}
AK_TO_XK_MAP = dict((v,k) for k, v in XK_TO_AK_MAP.iteritems())
XK_TO_AK_NUMLOCKED = {
XK.XK_KP_Insert : "0",
XK.XK_KP_Delete : ".",
XK.XK_KP_End : "1",
XK.XK_KP_Down : "2",
XK.XK_KP_Page_Down : "3",
XK.XK_KP_Left : "4",
XK.XK_KP_Begin : "5",
XK.XK_KP_Right : "6",
XK.XK_KP_Home : "7",
XK.XK_KP_Up: "8",
XK.XK_KP_Page_Up : "9",
XK.XK_KP_Divide : "/",
XK.XK_KP_Multiply : "*",
XK.XK_KP_Add : "+",
XK.XK_KP_Subtract : "-",
XK.XK_KP_Enter : Key.ENTER
}
class MockMediator:
"""
Mock IoMediator for testing purposes.
"""
def handle_modifier_down(self, modifier):
pass
def handle_modifier_up(self, modifier):
pass
def handle_keypress(self, keyCode, windowName):
pass
def handle_mouse_click(self):
pass
if __name__ == "__main__":
import time
x = XLibInterface(MockMediator(), True)
x.start()
x.keymap_test()
time.sleep(10.0)
#time.sleep(4.0)
#x.send_unicode_key([0, 3, 9, 4])
x.cancel()
print "Test completed. Thank you for your assistance in improving AutoKey!"
| gpl-3.0 |
linjoahow/cd0505 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_functiontestcase.py | 791 | 5478 | import unittest
from .support import LoggingResult
class Test_FunctionTestCase(unittest.TestCase):
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.countTestCases(), 1)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
raise RuntimeError('raised by setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
raise RuntimeError('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
self.fail('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
raise RuntimeError('raised by tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertIsInstance(test.id(), str)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__no_docstring(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.shortDescription(), None)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__singleline_docstring(self):
desc = "this tests foo"
test = unittest.FunctionTestCase(lambda: None, description=desc)
self.assertEqual(test.shortDescription(), "this tests foo")
| agpl-3.0 |
quamilek/django | tests/update_only_fields/tests.py | 296 | 9780 | from __future__ import unicode_literals
from django.db.models.signals import post_save, pre_save
from django.test import TestCase
from .models import Account, Employee, Person, Profile, ProxyEmployee
class UpdateOnlyFieldsTests(TestCase):
def test_update_fields_basic(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s.gender = 'M'
s.name = 'Ian'
s.save(update_fields=['name'])
s = Person.objects.get(pk=s.pk)
self.assertEqual(s.gender, 'F')
self.assertEqual(s.name, 'Ian')
def test_update_fields_deferred(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.defer("gender", "pid").get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_1(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_2(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(2):
s1.save(update_fields=['pid'])
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Sara")
self.assertEqual(s2.gender, "F")
def test_update_fields_only_repeated(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.gender = 'M'
with self.assertNumQueries(1):
s1.save()
# Test that the deferred class does not remember that gender was
# set, instead the instance should remember this.
s1 = Person.objects.only('name').get(pk=s.pk)
with self.assertNumQueries(1):
s1.save()
def test_update_fields_inheritance_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('name').get(pk=e1.pk)
e1.name = 'Linda'
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).name,
'Linda')
def test_update_fields_fk_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile').get(pk=e1.pk)
e1.profile = profile_receptionist
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_receptionist)
e1.profile_id = profile_boss.pk
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_boss)
def test_select_related_only_interaction(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile__salary').select_related('profile').get(pk=e1.pk)
profile_boss.name = 'Clerk'
profile_boss.salary = 1000
profile_boss.save()
# The loaded salary of 3000 gets saved, the name of 'Clerk' isn't
# overwritten.
with self.assertNumQueries(1):
e1.profile.save()
reloaded_profile = Profile.objects.get(pk=profile_boss.pk)
self.assertEqual(reloaded_profile.name, profile_boss.name)
self.assertEqual(reloaded_profile.salary, 3000)
def test_update_fields_m2m(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
a1 = Account.objects.create(num=1)
a2 = Account.objects.create(num=2)
e1.accounts = [a1, a2]
with self.assertRaises(ValueError):
e1.save(update_fields=['accounts'])
def test_update_fields_inheritance(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
with self.assertNumQueries(1):
e3.profile = profile_boss
e3.save(update_fields=['profile_id'])
e4 = Employee.objects.get(pk=e3.pk)
self.assertEqual(e4.profile, profile_boss)
self.assertEqual(e4.profile_id, profile_boss.pk)
def test_update_fields_inheritance_with_proxy_model(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = ProxyEmployee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
def test_update_fields_signals(self):
p = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
p.save(update_fields=['name'])
self.assertEqual(len(pre_save_data), 1)
self.assertEqual(len(pre_save_data[0]), 1)
self.assertIn('name', pre_save_data[0])
self.assertEqual(len(post_save_data), 1)
self.assertEqual(len(post_save_data[0]), 1)
self.assertIn('name', post_save_data[0])
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_update_fields_incorrect_params(self):
s = Person.objects.create(name='Sara', gender='F')
with self.assertRaises(ValueError):
s.save(update_fields=['first_name'])
with self.assertRaises(ValueError):
s.save(update_fields="name")
def test_empty_update_fields(self):
s = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
# Save is skipped.
with self.assertNumQueries(0):
s.save(update_fields=[])
# Signals were skipped, too...
self.assertEqual(len(pre_save_data), 0)
self.assertEqual(len(post_save_data), 0)
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_num_queries_inheritance(self):
s = Employee.objects.create(name='Sara', gender='F')
s.employee_num = 1
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['employee_num'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.employee_num, 1)
self.assertEqual(s.name, 'Sara')
s.employee_num = 2
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['name'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.name, 'Emily')
self.assertEqual(s.employee_num, 1)
# A little sanity check that we actually did updates...
self.assertEqual(Employee.objects.count(), 1)
self.assertEqual(Person.objects.count(), 1)
with self.assertNumQueries(2):
s.save(update_fields=['name', 'employee_num'])
| bsd-3-clause |
efiop/dvc | dvc/repo/experiments/base.py | 1 | 4573 | from typing import Optional
from dvc.exceptions import DvcException, InvalidArgumentError
# Experiment refs are stored according baseline git SHA:
# refs/exps/01/234abcd.../<exp_name>
EXPS_NAMESPACE = "refs/exps"
EXPS_STASH = f"{EXPS_NAMESPACE}/stash"
EXEC_NAMESPACE = f"{EXPS_NAMESPACE}/exec"
EXEC_APPLY = f"{EXEC_NAMESPACE}/EXEC_APPLY"
EXEC_CHECKPOINT = f"{EXEC_NAMESPACE}/EXEC_CHECKPOINT"
EXEC_BRANCH = f"{EXEC_NAMESPACE}/EXEC_BRANCH"
EXEC_BASELINE = f"{EXEC_NAMESPACE}/EXEC_BASELINE"
EXEC_HEAD = f"{EXEC_NAMESPACE}/EXEC_HEAD"
EXEC_MERGE = f"{EXEC_NAMESPACE}/EXEC_MERGE"
class UnchangedExperimentError(DvcException):
def __init__(self, rev):
super().__init__(f"Experiment unchanged from '{rev[:7]}'.")
self.rev = rev
class BaselineMismatchError(DvcException):
def __init__(self, rev, expected):
if hasattr(rev, "hexsha"):
rev = rev.hexsha
rev_str = f"{rev[:7]}" if rev is not None else "invalid commit"
super().__init__(
f"Experiment derived from '{rev_str}', expected '{expected[:7]}'."
)
self.rev = rev
self.expected_rev = expected
class ExperimentExistsError(DvcException):
def __init__(self, name: str):
msg = (
"Reproduced experiment conflicts with existing experiment "
f"'{name}'. To overwrite the existing experiment run:\n\n"
"\tdvc exp run -f ...\n\n"
"To run this experiment with a different name run:\n\n"
f"\tdvc exp run -n <new_name> ...\n"
)
super().__init__(msg)
self.name = name
class CheckpointExistsError(DvcException):
def __init__(self, name: str):
msg = (
"Reproduced checkpoint experiment conflicts with existing "
f"experiment '{name}'. To restart (and overwrite) the existing "
"experiment run:\n\n"
"\tdvc exp run -f ...\n\n"
"To resume the existing experiment, run:\n\n"
f"\tdvc exp resume {name}\n"
)
super().__init__(msg)
self.name = name
class InvalidExpRefError(DvcException):
def __init__(self, ref):
super().__init__(f"'{ref}' is not a valid experiment refname.")
self.ref = ref
class InvalidExpRevError(InvalidArgumentError):
def __init__(self, rev):
super().__init__(
f"'{rev}' does not appear to be an experiment commit."
)
class MultipleBranchError(DvcException):
def __init__(self, rev, ref_infos):
super().__init__(
f"Ambiguous commit '{rev[:7]}' belongs to multiple experiment "
"branches."
)
self.rev = rev
self.ref_infos = ref_infos
class ApplyConflictError(InvalidArgumentError):
def __init__(self, name):
super().__init__(
f"Experiment '{name}' cannot be applied to because your current "
"workspace contains changes which would be overwritten. Either "
"'git stash' your current changes before applying this "
"experiment, or re-run this command with '--force' to overwrite "
"your current changes."
)
self.name = name
class ExpRefInfo:
namespace = EXPS_NAMESPACE
def __init__(
self, baseline_sha: Optional[str] = None, name: Optional[str] = None
):
self.baseline_sha = baseline_sha
self.name: str = name if name else ""
def __str__(self):
return "/".join(self.parts)
def __repr__(self):
baseline = f"'{self.baseline_sha}'" if self.baseline_sha else "None"
name = f"'{self.name}'" if self.name else "None"
return f"ExpRefInfo(baseline_sha={baseline}, name={name})"
@property
def parts(self):
return (
(self.namespace,)
+ (
(self.baseline_sha[:2], self.baseline_sha[2:])
if self.baseline_sha
else ()
)
+ ((self.name,) if self.name else ())
)
@classmethod
def from_ref(cls, ref: str):
try:
parts = ref.split("/")
if (
len(parts) < 2
or len(parts) == 3
or len(parts) > 5
or "/".join(parts[:2]) != EXPS_NAMESPACE
):
InvalidExpRefError(ref)
except ValueError:
raise InvalidExpRefError(ref)
baseline_sha = parts[2] + parts[3] if len(parts) >= 4 else None
name = parts[4] if len(parts) == 5 else None
return cls(baseline_sha, name)
| apache-2.0 |
becm/meson | mesonbuild/modules/pkgconfig.py | 1 | 25361 | # Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, types
from pathlib import PurePath
from .. import build
from .. import dependencies
from ..dependencies.misc import ThreadDependency
from .. import mesonlib
from .. import mlog
from . import ModuleReturnValue
from . import ExtensionModule
from ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs
already_warned_objs = set()
class DependenciesHelper:
def __init__(self, state, name):
self.state = state
self.name = name
self.pub_libs = []
self.pub_reqs = []
self.priv_libs = []
self.priv_reqs = []
self.cflags = []
self.version_reqs = {}
def add_pub_libs(self, libs):
libs, reqs, cflags = self._process_libs(libs, True)
self.pub_libs = libs + self.pub_libs # prepend to preserve dependencies
self.pub_reqs += reqs
self.cflags += cflags
def add_priv_libs(self, libs):
libs, reqs, _ = self._process_libs(libs, False)
self.priv_libs = libs + self.priv_libs
self.priv_reqs += reqs
def add_pub_reqs(self, reqs):
self.pub_reqs += self._process_reqs(reqs)
def add_priv_reqs(self, reqs):
self.priv_reqs += self._process_reqs(reqs)
def _check_generated_pc_deprecation(self, obj):
if not hasattr(obj, 'generated_pc_warn'):
return
name = obj.generated_pc_warn[0]
if (name, obj.name) in already_warned_objs:
return
mlog.deprecation('Library', mlog.bold(obj.name), 'was passed to the '
'"libraries" keyword argument of a previous call '
'to generate() method instead of first positional '
'argument.', 'Adding', mlog.bold(obj.generated_pc),
'to "Requires" field, but this is a deprecated '
'behaviour that will change in a future version '
'of Meson. Please report the issue if this '
'warning cannot be avoided in your case.',
location=obj.generated_pc_warn[1])
already_warned_objs.add((name, obj.name))
def _process_reqs(self, reqs):
'''Returns string names of requirements'''
processed_reqs = []
for obj in mesonlib.unholder(mesonlib.listify(reqs)):
if not isinstance(obj, str):
FeatureNew.single_use('pkgconfig.generate requirement from non-string object', '0.46.0', self.state.subproject)
if hasattr(obj, 'generated_pc'):
self._check_generated_pc_deprecation(obj)
processed_reqs.append(obj.generated_pc)
elif hasattr(obj, 'pcdep'):
pcdeps = mesonlib.listify(obj.pcdep)
for d in pcdeps:
processed_reqs.append(d.name)
self.add_version_reqs(d.name, obj.version_reqs)
elif isinstance(obj, dependencies.PkgConfigDependency):
if obj.found():
processed_reqs.append(obj.name)
self.add_version_reqs(obj.name, obj.version_reqs)
elif isinstance(obj, str):
name, version_req = self.split_version_req(obj)
processed_reqs.append(name)
self.add_version_reqs(name, version_req)
elif isinstance(obj, dependencies.Dependency) and not obj.found():
pass
elif isinstance(obj, ThreadDependency):
pass
else:
raise mesonlib.MesonException('requires argument not a string, '
'library with pkgconfig-generated file '
'or pkgconfig-dependency object, '
'got {!r}'.format(obj))
return processed_reqs
def add_cflags(self, cflags):
self.cflags += mesonlib.stringlistify(cflags)
def _process_libs(self, libs, public):
libs = mesonlib.unholder(mesonlib.listify(libs))
processed_libs = []
processed_reqs = []
processed_cflags = []
for obj in libs:
shared_library_only = getattr(obj, 'shared_library_only', False)
if hasattr(obj, 'pcdep'):
pcdeps = mesonlib.listify(obj.pcdep)
for d in pcdeps:
processed_reqs.append(d.name)
self.add_version_reqs(d.name, obj.version_reqs)
elif hasattr(obj, 'generated_pc'):
self._check_generated_pc_deprecation(obj)
processed_reqs.append(obj.generated_pc)
elif isinstance(obj, dependencies.PkgConfigDependency):
if obj.found():
processed_reqs.append(obj.name)
self.add_version_reqs(obj.name, obj.version_reqs)
elif isinstance(obj, dependencies.InternalDependency):
if obj.found():
processed_libs += obj.get_link_args()
processed_cflags += obj.get_compile_args()
if public:
self.add_pub_libs(obj.libraries)
else:
self.add_priv_libs(obj.libraries)
elif isinstance(obj, dependencies.Dependency):
if obj.found():
processed_libs += obj.get_link_args()
processed_cflags += obj.get_compile_args()
elif isinstance(obj, build.SharedLibrary) and shared_library_only:
# Do not pull dependencies for shared libraries because they are
# only required for static linking. Adding private requires has
# the side effect of exposing their cflags, which is the
# intended behaviour of pkg-config but force Debian to add more
# than needed build deps.
# See https://bugs.freedesktop.org/show_bug.cgi?id=105572
processed_libs.append(obj)
elif isinstance(obj, (build.SharedLibrary, build.StaticLibrary)):
processed_libs.append(obj)
if isinstance(obj, build.StaticLibrary) and public:
self.add_pub_libs(obj.get_dependencies(for_pkgconfig=True))
self.add_pub_libs(obj.get_external_deps())
else:
self.add_priv_libs(obj.get_dependencies(for_pkgconfig=True))
self.add_priv_libs(obj.get_external_deps())
elif isinstance(obj, str):
processed_libs.append(obj)
else:
raise mesonlib.MesonException('library argument not a string, library or dependency object.')
return processed_libs, processed_reqs, processed_cflags
def add_version_reqs(self, name, version_reqs):
if version_reqs:
if name not in self.version_reqs:
self.version_reqs[name] = set()
# Note that pkg-config is picky about whitespace.
# 'foo > 1.2' is ok but 'foo>1.2' is not.
# foo, bar' is ok, but 'foo,bar' is not.
new_vreqs = [s for s in mesonlib.stringlistify(version_reqs)]
self.version_reqs[name].update(new_vreqs)
def split_version_req(self, s):
for op in ['>=', '<=', '!=', '==', '=', '>', '<']:
pos = s.find(op)
if pos > 0:
return s[0:pos].strip(), s[pos:].strip()
return s, None
def format_vreq(self, vreq):
# vreq are '>=1.0' and pkgconfig wants '>= 1.0'
for op in ['>=', '<=', '!=', '==', '=', '>', '<']:
if vreq.startswith(op):
return op + ' ' + vreq[len(op):]
return vreq
def format_reqs(self, reqs):
result = []
for name in reqs:
vreqs = self.version_reqs.get(name, None)
if vreqs:
result += [name + ' ' + self.format_vreq(vreq) for vreq in vreqs]
else:
result += [name]
return ', '.join(result)
def remove_dups(self):
def _fn(xs, libs=False):
# Remove duplicates whilst preserving original order
result = []
for x in xs:
# Don't de-dup unknown strings to avoid messing up arguments like:
# ['-framework', 'CoreAudio', '-framework', 'CoreMedia']
known_flags = ['-pthread']
cannot_dedup = libs and isinstance(x, str) and \
not x.startswith(('-l', '-L')) and \
x not in known_flags
if x not in result or cannot_dedup:
result.append(x)
return result
self.pub_libs = _fn(self.pub_libs, True)
self.pub_reqs = _fn(self.pub_reqs)
self.priv_libs = _fn(self.priv_libs, True)
self.priv_reqs = _fn(self.priv_reqs)
self.cflags = _fn(self.cflags)
# Remove from private libs/reqs if they are in public already
self.priv_libs = [i for i in self.priv_libs if i not in self.pub_libs]
self.priv_reqs = [i for i in self.priv_reqs if i not in self.pub_reqs]
class PkgConfigModule(ExtensionModule):
def _get_lname(self, l, msg, pcfile):
# Nothing special
if not l.name_prefix_set:
return l.name
# Sometimes people want the library to start with 'lib' everywhere,
# which is achieved by setting name_prefix to '' and the target name to
# 'libfoo'. In that case, try to get the pkg-config '-lfoo' arg correct.
if l.prefix == '' and l.name.startswith('lib'):
return l.name[3:]
# If the library is imported via an import library which is always
# named after the target name, '-lfoo' is correct.
if isinstance(l, build.SharedLibrary) and l.import_filename:
return l.name
# In other cases, we can't guarantee that the compiler will be able to
# find the library via '-lfoo', so tell the user that.
mlog.warning(msg.format(l.name, 'name_prefix', l.name, pcfile))
return l.name
def _escape(self, value):
'''
We cannot use quote_arg because it quotes with ' and " which does not
work with pkg-config and pkgconf at all.
'''
# We should always write out paths with / because pkg-config requires
# spaces to be quoted with \ and that messes up on Windows:
# https://bugs.freedesktop.org/show_bug.cgi?id=103203
if isinstance(value, PurePath):
value = value.as_posix()
return value.replace(' ', r'\ ')
def _make_relative(self, prefix, subdir):
if isinstance(prefix, PurePath):
prefix = prefix.as_posix()
if isinstance(subdir, PurePath):
subdir = subdir.as_posix()
try:
if os.path.commonpath([prefix, subdir]) == prefix:
skip = len(prefix) + 1
subdir = subdir[skip:]
except ValueError:
pass
return subdir
def generate_pkgconfig_file(self, state, deps, subdirs, name, description,
url, version, pcfile, conflicts, variables,
uninstalled=False, dataonly=False):
deps.remove_dups()
coredata = state.environment.get_coredata()
if uninstalled:
outdir = os.path.join(state.environment.build_dir, 'meson-uninstalled')
if not os.path.exists(outdir):
os.mkdir(outdir)
prefix = PurePath(state.environment.get_build_dir())
srcdir = PurePath(state.environment.get_source_dir())
else:
outdir = state.environment.scratch_dir
prefix = PurePath(coredata.get_builtin_option('prefix'))
# These always return paths relative to prefix
libdir = PurePath(coredata.get_builtin_option('libdir'))
incdir = PurePath(coredata.get_builtin_option('includedir'))
fname = os.path.join(outdir, pcfile)
with open(fname, 'w', encoding='utf-8') as ofile:
if not dataonly:
ofile.write('prefix={}\n'.format(self._escape(prefix)))
if uninstalled:
ofile.write('srcdir={}\n'.format(self._escape(srcdir)))
else:
ofile.write('libdir={}\n'.format(self._escape('${prefix}' / libdir)))
ofile.write('includedir={}\n'.format(self._escape('${prefix}' / incdir)))
if variables:
ofile.write('\n')
for k, v in variables:
ofile.write('{}={}\n'.format(k, self._escape(v)))
ofile.write('\n')
ofile.write('Name: %s\n' % name)
if len(description) > 0:
ofile.write('Description: %s\n' % description)
if len(url) > 0:
ofile.write('URL: %s\n' % url)
ofile.write('Version: %s\n' % version)
reqs_str = deps.format_reqs(deps.pub_reqs)
if len(reqs_str) > 0:
ofile.write('Requires: {}\n'.format(reqs_str))
reqs_str = deps.format_reqs(deps.priv_reqs)
if len(reqs_str) > 0:
ofile.write('Requires.private: {}\n'.format(reqs_str))
if len(conflicts) > 0:
ofile.write('Conflicts: {}\n'.format(' '.join(conflicts)))
def generate_libs_flags(libs):
msg = 'Library target {0!r} has {1!r} set. Compilers ' \
'may not find it from its \'-l{2}\' linker flag in the ' \
'{3!r} pkg-config file.'
Lflags = []
for l in libs:
if isinstance(l, str):
yield l
else:
if uninstalled:
install_dir = os.path.dirname(state.backend.get_target_filename_abs(l))
else:
install_dir = l.get_custom_install_dir()[0]
if install_dir is False:
continue
if 'cs' in l.compilers:
if isinstance(install_dir, str):
Lflag = '-r${prefix}/%s/%s' % (self._escape(self._make_relative(prefix, install_dir)), l.filename)
else: # install_dir is True
Lflag = '-r${libdir}/%s' % l.filename
else:
if isinstance(install_dir, str):
Lflag = '-L${prefix}/%s' % self._escape(self._make_relative(prefix, install_dir))
else: # install_dir is True
Lflag = '-L${libdir}'
if Lflag not in Lflags:
Lflags.append(Lflag)
yield Lflag
lname = self._get_lname(l, msg, pcfile)
# If using a custom suffix, the compiler may not be able to
# find the library
if l.name_suffix_set:
mlog.warning(msg.format(l.name, 'name_suffix', lname, pcfile))
if 'cs' not in l.compilers:
yield '-l%s' % lname
def get_uninstalled_include_dirs(libs):
result = []
for l in libs:
if isinstance(l, str):
continue
if l.get_subdir() not in result:
result.append(l.get_subdir())
for i in l.get_include_dirs():
curdir = i.get_curdir()
for d in i.get_incdirs():
path = os.path.join(curdir, d)
if path not in result:
result.append(path)
return result
def generate_uninstalled_cflags(libs):
for d in get_uninstalled_include_dirs(libs):
for basedir in ['${prefix}', '${srcdir}']:
path = os.path.join(basedir, d)
yield '-I%s' % self._escape(path)
if len(deps.pub_libs) > 0:
ofile.write('Libs: {}\n'.format(' '.join(generate_libs_flags(deps.pub_libs))))
if len(deps.priv_libs) > 0:
ofile.write('Libs.private: {}\n'.format(' '.join(generate_libs_flags(deps.priv_libs))))
cflags = []
if uninstalled:
cflags += generate_uninstalled_cflags(deps.pub_libs + deps.priv_libs)
else:
for d in subdirs:
if d == '.':
cflags.append('-I${includedir}')
else:
cflags.append(self._escape(PurePath('-I${includedir}') / d))
cflags += [self._escape(f) for f in deps.cflags]
if cflags and not dataonly:
ofile.write('Cflags: {}\n'.format(' '.join(cflags)))
@FeatureNewKwargs('pkgconfig.generate', '0.54.0', ['uninstalled_variables'])
@FeatureNewKwargs('pkgconfig.generate', '0.42.0', ['extra_cflags'])
@FeatureNewKwargs('pkgconfig.generate', '0.41.0', ['variables'])
@FeatureNewKwargs('pkgconfig.generate', '0.54.0', ['dataonly'])
@permittedKwargs({'libraries', 'version', 'name', 'description', 'filebase',
'subdirs', 'requires', 'requires_private', 'libraries_private',
'install_dir', 'extra_cflags', 'variables', 'url', 'd_module_versions',
'dataonly', 'conflicts'})
def generate(self, state, args, kwargs):
default_version = state.project_version['version']
default_install_dir = None
default_description = None
default_name = None
mainlib = None
default_subdirs = ['.']
if not args and 'version' not in kwargs:
FeatureNew.single_use('pkgconfig.generate implicit version keyword', '0.46.0', state.subproject)
elif len(args) == 1:
FeatureNew.single_use('pkgconfig.generate optional positional argument', '0.46.0', state.subproject)
mainlib = getattr(args[0], 'held_object', args[0])
if not isinstance(mainlib, (build.StaticLibrary, build.SharedLibrary)):
raise mesonlib.MesonException('Pkgconfig_gen first positional argument must be a library object')
default_name = mainlib.name
default_description = state.project_name + ': ' + mainlib.name
install_dir = mainlib.get_custom_install_dir()[0]
if isinstance(install_dir, str):
default_install_dir = os.path.join(install_dir, 'pkgconfig')
elif len(args) > 1:
raise mesonlib.MesonException('Too many positional arguments passed to Pkgconfig_gen.')
dataonly = kwargs.get('dataonly', False)
if dataonly:
default_subdirs = []
blocked_vars = ['libraries', 'libraries_private', 'require_private', 'extra_cflags', 'subdirs']
if len(set(kwargs) & set(blocked_vars)) > 0:
raise mesonlib.MesonException('Cannot combine dataonly with any of {}'.format(blocked_vars))
subdirs = mesonlib.stringlistify(kwargs.get('subdirs', default_subdirs))
version = kwargs.get('version', default_version)
if not isinstance(version, str):
raise mesonlib.MesonException('Version must be specified.')
name = kwargs.get('name', default_name)
if not isinstance(name, str):
raise mesonlib.MesonException('Name not specified.')
filebase = kwargs.get('filebase', name)
if not isinstance(filebase, str):
raise mesonlib.MesonException('Filebase must be a string.')
description = kwargs.get('description', default_description)
if not isinstance(description, str):
raise mesonlib.MesonException('Description is not a string.')
url = kwargs.get('url', '')
if not isinstance(url, str):
raise mesonlib.MesonException('URL is not a string.')
conflicts = mesonlib.stringlistify(kwargs.get('conflicts', []))
# Prepend the main library to public libraries list. This is required
# so dep.add_pub_libs() can handle dependency ordering correctly and put
# extra libraries after the main library.
libraries = mesonlib.extract_as_list(kwargs, 'libraries')
if mainlib:
libraries = [mainlib] + libraries
deps = DependenciesHelper(state, filebase)
deps.add_pub_libs(libraries)
deps.add_priv_libs(kwargs.get('libraries_private', []))
deps.add_pub_reqs(kwargs.get('requires', []))
deps.add_priv_reqs(kwargs.get('requires_private', []))
deps.add_cflags(kwargs.get('extra_cflags', []))
dversions = kwargs.get('d_module_versions', None)
if dversions:
compiler = state.environment.coredata.compilers.host.get('d')
if compiler:
deps.add_cflags(compiler.get_feature_args({'versions': dversions}, None))
def parse_variable_list(stringlist):
reserved = ['prefix', 'libdir', 'includedir']
variables = []
for var in stringlist:
# foo=bar=baz is ('foo', 'bar=baz')
l = var.split('=', 1)
if len(l) < 2:
raise mesonlib.MesonException('Invalid variable "{}". Variables must be in \'name=value\' format'.format(var))
name, value = l[0].strip(), l[1].strip()
if not name or not value:
raise mesonlib.MesonException('Invalid variable "{}". Variables must be in \'name=value\' format'.format(var))
# Variable names must not contain whitespaces
if any(c.isspace() for c in name):
raise mesonlib.MesonException('Invalid whitespace in assignment "{}"'.format(var))
if name in reserved:
raise mesonlib.MesonException('Variable "{}" is reserved'.format(name))
variables.append((name, value))
return variables
variables = parse_variable_list(mesonlib.stringlistify(kwargs.get('variables', [])))
pcfile = filebase + '.pc'
pkgroot = kwargs.get('install_dir', default_install_dir)
if pkgroot is None:
if mesonlib.is_freebsd():
pkgroot = os.path.join(state.environment.coredata.get_builtin_option('prefix'), 'libdata', 'pkgconfig')
else:
pkgroot = os.path.join(state.environment.coredata.get_builtin_option('libdir'), 'pkgconfig')
if not isinstance(pkgroot, str):
raise mesonlib.MesonException('Install_dir must be a string.')
self.generate_pkgconfig_file(state, deps, subdirs, name, description, url,
version, pcfile, conflicts, variables,
False, dataonly)
res = build.Data(mesonlib.File(True, state.environment.get_scratch_dir(), pcfile), pkgroot)
variables = parse_variable_list(mesonlib.stringlistify(kwargs.get('uninstalled_variables', [])))
pcfile = filebase + '-uninstalled.pc'
self.generate_pkgconfig_file(state, deps, subdirs, name, description, url,
version, pcfile, conflicts, variables,
uninstalled=True, dataonly=dataonly)
# Associate the main library with this generated pc file. If the library
# is used in any subsequent call to the generated, it will generate a
# 'Requires:' or 'Requires.private:'.
# Backward compatibility: We used to set 'generated_pc' on all public
# libraries instead of just the main one. Keep doing that but warn if
# anyone is relying on that deprecated behaviour.
if mainlib:
if not hasattr(mainlib, 'generated_pc'):
mainlib.generated_pc = filebase
else:
mlog.warning('Already generated a pkg-config file for', mlog.bold(mainlib.name))
else:
for lib in deps.pub_libs:
if not isinstance(lib, str) and not hasattr(lib, 'generated_pc'):
lib.generated_pc = filebase
location = state.current_node
lib.generated_pc_warn = [name, location]
return ModuleReturnValue(res, [res])
def initialize(*args, **kwargs):
return PkgConfigModule(*args, **kwargs)
| apache-2.0 |
manteapi/hokusai | test/gtest-1.7.0/test/gtest_test_utils.py | 1100 | 10812 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| gpl-2.0 |
Pexego/l10n-spain | l10n_es_aeat_mod130/wizard/export_mod130_to_boe.py | 3 | 7462 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from datetime import datetime
from openerp.tools.translate import _
from openerp.osv import orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
class L10nEsAeatMod130ExportToBoe(orm.TransientModel):
_inherit = "l10n.es.aeat.report.export_to_boe"
_name = 'l10n.es.aeat.mod130.export_to_boe'
def _cleanString(self, string):
return string.replace("-", "").replace(" ", "").replace("/", "")
def _get_formatted_declaration_record(self, cr, uid, report, context=None):
res = ''
# cabecera
res += "13001 "
# Tipo de declaración
# B (resultado a deducir)
# G (cuenta corriente tributaria-ingreso)
# I (ingreso)
# N (negativa)
# U (domiciliación del ingreso en CCC)
res += self._formatString(report.tipo_declaracion, 1)
# Código Administración - No se usan
res += self._formatString("", 5)
# Identificación (1)
res += self._formatString(report.company_vat, 9) # NIF del declarante
# Comienzo primer apellido
res += self._formatString(report.company_id.name, 4)
res += self._formatString(report.company_id.name, 30) # Apellidos
res += self._formatString("", 15) # Nombre
res += self._formatNumber(report.fiscalyear_id.code, 4) # Ejercicio
res += self._formatString(report.period, 2)
return res
def _get_formatted_main_record(self, cr, uid, report, context=None):
res = ''
# I. Activ. económicas estimac. Directa - Ingresos computables [01]
res += self._formatNumber(report.casilla_01, 11, 2)
# I. Activ. económicas estimac. Directa - Gastos fiscalmente deducibles
# [02]
res += self._formatNumber(report.casilla_02, 11, 2)
# I. Activ. económicas estimac. Directa - Rendimiento neto [03]
res += self._formatNumber(report.casilla_03, 11, 2)
# I. Activ. económicas estimac. Directa - 20% de la casilla 03 [04]
res += self._formatNumber(report.casilla_04, 11, 2)
# I. Activ. económicas estimac. Directa - A deducir - De los trimestres
# anteriores [05]
res += self._formatNumber(report.casilla_05, 11, 2)
# I. Activ. económicas estimac. Directa - A deducir - Retenciones e
# ingr. a cuenta [06]
res += self._formatNumber(report.casilla_06, 11, 2)
# I. Activ. económicas estimac. Directa - Pago fraccionado previo del
# trimestre [07]
res += self._formatNumber(report.casilla_07, 11, 2)
# II. Activ. agrícola. estimac. directa - Volumen de ingresos [08]
res += self._formatNumber(report.casilla_08, 11, 2)
# II. Activ. agrícola. estimac. directa - 2% de la casilla 08 [09]
res += self._formatNumber(report.casilla_09, 11, 2)
# II. Activ. agrícola. estimac. directa - A deducir- Retenciones e
# ingr. a cuenta [10]
res += self._formatNumber(report.casilla_10, 11, 2)
# II. Activ. agrícola estimac. directa - Pago fraccionado previo del
# trimestre [11]
res += self._formatNumber(report.casilla_11, 11, 2)
# III. Total liquidación - Suma de pagos fraccionados previos del
# trimestre [12]
res += self._formatNumber(report.casilla_12, 11, 2)
# III. Total liquidación -Minoración por aplicación de la deducción.
# Artículo 80 bis [13]
res += self._formatNumber(report.casilla_13, 11, 2)
# III. Total liquidación - Diferencia (12) - (13) [14]
res += self._formatNumber(report.casilla_14, 11, 2)
# III. Total liquidación - A deducir - Resultados negativos de
# trimestres anteriores [15]
res += self._formatNumber(report.casilla_15, 11, 2)
# III. Total liquidación - Pago de préstamos para la adquisición de
# vivienda habitual [16]
res += self._formatNumber(report.casilla_16, 11, 2)
# III. Total liquidación - Total (14) - (15) [17]
res += self._formatNumber(report.casilla_17, 11, 2)
# III. Total liquidación - A deducir - Resultado de las anteriores
# declaraciones [18]
res += self._formatNumber(report.casilla_18, 11, 2)
# III. Total liquidación - Resultado de la declaración [19]
res += self._formatNumber(report.result, 11, 2)
return res
def _get_formatted_other_records(self, cr, uid, report, context=None):
res = ''
# Ingreso (4) Importe del ingreso
res += self._formatNumber(report.result if report.result > 0 else 0,
11, 2)
# Ingreso (4) Forma de pago - "0" No consta, "1" Efectivo,
# "2" Adeudo en cuenta, "3" Domiciliación
res += self._formatString("0", 1)
# Ingreso (4) CCC - Entidad - Sucursal - DC - Nº de cuenta - SIN USO
res += self._formatString("", 20)
# A deducir (5) Declaración con resultado a deducir en los siguientes
# pagos fraccionados
res += self._formatBoolean(report.result < 0 and report.period != '4T',
yes='X', no=' ')
# Complementaria (7) Cod. electrónico declaración anterior
res += self._formatString(report.previous_electronic_code if
report.complementary else "", 16)
# Complementaria (7) Nº justificante declaración anterior
res += self._formatString(report.previous_declaration if
report.complementary else "", 13)
# Persona de contacto
res += self._formatString(report.company_id.name, 100)
# Teléfono
res += self._formatString(self._cleanString(report.company_id.phone),
9)
# Observaciones
res += self._formatString(report.comments, 350)
# Localidad
res += self._formatString(report.company_id.partner_id.city, 16)
date = datetime.strptime(report.calculation_date,
DEFAULT_SERVER_DATETIME_FORMAT)
# fecha: Dia
res += self._formatString(date.strftime("%d"), 2)
# fecha: Mes
res += self._formatString(_(date.strftime("%B")), 10)
# fecha: Año
res += self._formatString(date.strftime("%Y"), 4)
res += "\r\n".encode("ascii")
return res
def _do_global_checks(self, report, contents, context=None):
assert len(contents) == 880, (
"The 130 report must be 880 characters long and are %s" %
len(contents)
)
return True
| agpl-3.0 |
gangadharkadam/verveerp | erpnext/hr/doctype/employee/employee.py | 4 | 7457 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, validate_email_add, cint, today
from frappe.model.naming import make_autoname
from frappe import throw, _, msgprint
import frappe.permissions
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
from erpnext.utilities.transaction_base import delete_events
class EmployeeUserDisabledError(frappe.ValidationError): pass
class Employee(Document):
def onload(self):
self.get("__onload").salary_structure_exists = frappe.db.get_value("Salary Structure",
{"employee": self.name, "is_active": "Yes", "docstatus": ["!=", 2]})
def autoname(self):
naming_method = frappe.db.get_value("HR Settings", None, "emp_created_by")
if not naming_method:
throw(_("Please setup Employee Naming System in Human Resource > HR Settings"))
else:
if naming_method=='Naming Series':
self.name = make_autoname(self.naming_series + '.####')
elif naming_method=='Employee Number':
self.name = self.employee_number
self.employee = self.name
def validate(self):
from erpnext.utilities import validate_status
validate_status(self.status, ["Active", "Left"])
self.employee = self.name
self.validate_date()
self.validate_email()
self.validate_status()
self.validate_employee_leave_approver()
self.validate_reports_to()
if self.user_id:
self.validate_for_enabled_user_id()
self.validate_duplicate_user_id()
def on_update(self):
if self.user_id:
self.update_user()
self.update_user_permissions()
def update_user_permissions(self):
frappe.permissions.add_user_permission("Employee", self.name, self.user_id)
frappe.permissions.set_user_permission_if_allowed("Company", self.company, self.user_id)
def update_user(self):
# add employee role if missing
user = frappe.get_doc("User", self.user_id)
user.flags.ignore_permissions = True
if "Employee" not in user.get("user_roles"):
user.add_roles("Employee")
# copy details like Fullname, DOB and Image to User
if self.employee_name and not (user.first_name and user.last_name):
employee_name = self.employee_name.split(" ")
if len(employee_name) >= 3:
user.last_name = " ".join(employee_name[2:])
user.middle_name = employee_name[1]
elif len(employee_name) == 2:
user.last_name = employee_name[1]
user.first_name = employee_name[0]
if self.date_of_birth:
user.birth_date = self.date_of_birth
if self.gender:
user.gender = self.gender
if self.image:
if not user.user_image:
user.user_image = self.image
try:
frappe.get_doc({
"doctype": "File Data",
"file_name": self.image,
"attached_to_doctype": "User",
"attached_to_name": self.user_id
}).insert()
except frappe.DuplicateEntryError:
# already exists
pass
user.save()
def validate_date(self):
if self.date_of_birth and self.date_of_joining and getdate(self.date_of_birth) >= getdate(self.date_of_joining):
throw(_("Date of Joining must be greater than Date of Birth"))
elif self.date_of_retirement and self.date_of_joining and (getdate(self.date_of_retirement) <= getdate(self.date_of_joining)):
throw(_("Date Of Retirement must be greater than Date of Joining"))
elif self.relieving_date and self.date_of_joining and (getdate(self.relieving_date) <= getdate(self.date_of_joining)):
throw(_("Relieving Date must be greater than Date of Joining"))
elif self.contract_end_date and self.date_of_joining and (getdate(self.contract_end_date)<=getdate(self.date_of_joining)):
throw(_("Contract End Date must be greater than Date of Joining"))
def validate_email(self):
if self.company_email and not validate_email_add(self.company_email):
throw(_("Please enter valid Company Email"))
if self.personal_email and not validate_email_add(self.personal_email):
throw(_("Please enter valid Personal Email"))
def validate_status(self):
if self.status == 'Left' and not self.relieving_date:
throw(_("Please enter relieving date."))
def validate_for_enabled_user_id(self):
if not self.status == 'Active':
return
enabled = frappe.db.sql("""select name from `tabUser` where
name=%s and enabled=1""", self.user_id)
if not enabled:
throw(_("User {0} is disabled").format(self.user_id), EmployeeUserDisabledError)
def validate_duplicate_user_id(self):
employee = frappe.db.sql_list("""select name from `tabEmployee` where
user_id=%s and status='Active' and name!=%s""", (self.user_id, self.name))
if employee:
throw(_("User {0} is already assigned to Employee {1}").format(self.user_id, employee[0]))
def validate_employee_leave_approver(self):
for l in self.get("leave_approvers")[:]:
if "Leave Approver" not in frappe.get_roles(l.leave_approver):
self.get("leave_approvers").remove(l)
msgprint(_("{0} is not a valid Leave Approver. Removing row #{1}.").format(l.leave_approver, l.idx))
def validate_reports_to(self):
if self.reports_to == self.name:
throw(_("Employee cannot report to himself."))
def on_trash(self):
delete_events(self.doctype, self.name)
@frappe.whitelist()
def get_retirement_date(date_of_birth=None):
import datetime
ret = {}
if date_of_birth:
dt = getdate(date_of_birth) + datetime.timedelta(21915)
ret = {'date_of_retirement': dt.strftime('%Y-%m-%d')}
return ret
@frappe.whitelist()
def make_salary_structure(source_name, target=None):
target = get_mapped_doc("Employee", source_name, {
"Employee": {
"doctype": "Salary Structure",
"field_map": {
"name": "employee"
}
}
})
target.make_earn_ded_table()
return target
def validate_employee_role(doc, method):
# called via User hook
if "Employee" in [d.role for d in doc.get("user_roles")]:
if not frappe.db.get_value("Employee", {"user_id": doc.name}):
frappe.msgprint(_("Please set User ID field in an Employee record to set Employee Role"))
doc.get("user_roles").remove(doc.get("user_roles", {"role": "Employee"})[0])
def update_user_permissions(doc, method):
# called via User hook
if "Employee" in [d.role for d in doc.get("user_roles")]:
employee = frappe.get_doc("Employee", {"user_id": doc.name})
employee.update_user_permissions()
def send_birthday_reminders():
"""Send Employee birthday reminders if no 'Stop Birthday Reminders' is not set."""
if int(frappe.db.get_single_value("HR Settings", "stop_birthday_reminders") or 0):
return
from frappe.utils.user import get_enabled_system_users
users = None
birthdays = get_employees_who_are_born_today()
if birthdays:
if not users:
users = [u.email_id or u.name for u in get_enabled_system_users()]
for e in birthdays:
frappe.sendmail(recipients=filter(lambda u: u not in (e.company_email, e.personal_email), users),
subject=_("Birthday Reminder for {0}").format(e.employee_name),
message=_("""Today is {0}'s birthday!""").format(e.employee_name),
reply_to=e.company_email or e.personal_email,
bulk=True)
def get_employees_who_are_born_today():
"""Get Employee properties whose birthday is today."""
return frappe.db.sql("""select name, personal_email, company_email, employee_name
from tabEmployee where day(date_of_birth) = day(%(date)s)
and month(date_of_birth) = month(%(date)s)
and status = 'Active'""", {"date": today()}, as_dict=True)
| agpl-3.0 |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part01-e16_transform/tmc/utils.py | 91 | 2610 | import importlib
import sys
from unittest.mock import MagicMock
def load(pkg, method, err=None):
if not err:
err = '{0}.{1} does not exist!'.format(pkg, method)
def fail(*args, **kwargs):
raise AssertionError(err)
try:
return getattr(importlib.import_module(pkg), method)
except Exception:
return fail
def get_out():
return sys.stdout.getvalue().strip()
def get_err():
return sys.stderr.getvalue().strip()
def any_contains(needle, haystack):
any(map(lambda x: needle in x, haystack))
# This solution to wrap a patched method comes originally from
# https://stackoverflow.com/questions/25608107/python-mock-patching-a-method-without-obstructing-implementation
def spy_decorator(method_to_decorate, name):
mock = MagicMock(name="%s method" % name)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method_to_decorate(self, *args, **kwargs)
wrapper.mock = mock
return wrapper
class patch_helper(object):
def __init__(self, module_name):
import importlib
self.m = module_name
def __call__(self, d):
#import importlib
parts=d.split(".")
# If e.g. d == package.subpackage.subpackage2.attribute,
# and our module is called mystery_data.
try:
getattr(importlib.import_module(self.m), parts[-1]) # attribute
p=".".join([self.m, parts[-1]])
# p='src.mystery_data.attribute'
except ModuleNotFoundError:
raise
except AttributeError:
if len(parts) == 1:
raise
try:
getattr(importlib.import_module(self.m), parts[-2]) # subpackage2.attribute
p=".".join([self.m] + parts[-2:])
# p='src.mystery_data.subpackage2.attribute'
except AttributeError:
if len(parts) == 2:
raise
try:
getattr(importlib.import_module(self.m), parts[-3]) # subpackage.subpackage2.attribute
p=".".join([self.m] + parts[-3:])
# p='src.mystery_date.subpackage.subpackage2.attribute'
except AttributeError:
if len(parts) == 3:
raise
# package.subpackage.subpackage2.attribute
getattr(importlib.import_module(self.m), parts[-4])
p=".".join([self.m] + parts[-4:])
# p='src.mystery_date.package.subpackage.subpackage2.attribute'
return p
| gpl-3.0 |
petrutlucian94/cinder | cinder/volume/drivers/srb.py | 14 | 33627 | # Copyright (c) 2014 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for the Scality REST Block storage system
This driver provisions Linux SRB volumes leveraging RESTful storage platforms
(e.g. Scality CDMI).
"""
import contextlib
import functools
import re
import sys
import time
from oslo_concurrency import lockutils
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from six.moves import range
from cinder.brick.local_dev import lvm
from cinder import exception
from cinder.i18n import _, _LI, _LE, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
srb_opts = [
cfg.StrOpt('srb_base_urls',
default=None,
help='Comma-separated list of REST servers IP to connect to. '
'(eg http://IP1/,http://IP2:81/path'),
]
CONF = cfg.CONF
CONF.register_opts(srb_opts)
ACCEPTED_REST_SERVER = re.compile(r'^http://'
'(\d{1,3}\.){3}\d{1,3}'
'(:\d+)?/[a-zA-Z0-9\-_\/]*$')
class retry(object):
SLEEP_NONE = 'none'
SLEEP_DOUBLE = 'double'
SLEEP_INCREMENT = 'increment'
def __init__(self, exceptions, count, sleep_mechanism=SLEEP_INCREMENT,
sleep_factor=1):
if sleep_mechanism not in [self.SLEEP_NONE,
self.SLEEP_DOUBLE,
self.SLEEP_INCREMENT]:
raise ValueError('Invalid value for `sleep_mechanism` argument')
self._exceptions = exceptions
self._count = count
self._sleep_mechanism = sleep_mechanism
self._sleep_factor = sleep_factor
def __call__(self, fun):
func_name = fun.func_name
@functools.wraps(fun)
def wrapped(*args, **kwargs):
sleep_time = self._sleep_factor
exc_info = None
for attempt in range(self._count):
if attempt != 0:
LOG.warning(_LW('Retrying failed call to %(func)s, '
'attempt %(attempt)i.'),
{'func': func_name,
'attempt': attempt})
try:
return fun(*args, **kwargs)
except self._exceptions:
exc_info = sys.exc_info()
if attempt != self._count - 1:
if self._sleep_mechanism == self.SLEEP_NONE:
continue
elif self._sleep_mechanism == self.SLEEP_INCREMENT:
time.sleep(sleep_time)
sleep_time += self._sleep_factor
elif self._sleep_mechanism == self.SLEEP_DOUBLE:
time.sleep(sleep_time)
sleep_time *= 2
else:
raise ValueError('Unknown sleep mechanism: %r'
% self._sleep_mechanism)
six.reraise(exc_info[0], exc_info[1], exc_info[2])
return wrapped
class LVM(lvm.LVM):
def activate_vg(self):
"""Activate the Volume Group associated with this instantiation.
:raises: putils.ProcessExecutionError
"""
cmd = ['vgchange', '-ay', self.vg_name]
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error activating Volume Group'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def deactivate_vg(self):
"""Deactivate the Volume Group associated with this instantiation.
This forces LVM to release any reference to the device.
:raises: putils.ProcessExecutionError
"""
cmd = ['vgchange', '-an', self.vg_name]
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error deactivating Volume Group'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def destroy_vg(self):
"""Destroy the Volume Group associated with this instantiation.
:raises: putils.ProcessExecutionError
"""
cmd = ['vgremove', '-f', self.vg_name]
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error destroying Volume Group'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def pv_resize(self, pv_name, new_size_str):
"""Extend the size of an existing PV (for virtual PVs).
:raises: putils.ProcessExecutionError
"""
try:
self._execute('pvresize',
'--setphysicalvolumesize', new_size_str,
pv_name,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error resizing Physical Volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def extend_thin_pool(self):
"""Extend the size of the thin provisioning pool.
This method extends the size of a thin provisioning pool to 95% of the
size of the VG, if the VG is configured as thin and owns a thin
provisioning pool.
:raises: putils.ProcessExecutionError
"""
if self.vg_thin_pool is None:
return
new_size_str = self._calculate_thin_pool_size()
try:
self._execute('lvextend',
'-L', new_size_str,
"%s/%s-pool" % (self.vg_name, self.vg_name),
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error extending thin provisioning pool'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
@contextlib.contextmanager
def patched(obj, attr, fun):
"""Context manager to locally patch a method.
Within the managed context, the `attr` method of `obj` will be replaced by
a method which calls `fun` passing in the original `attr` attribute of
`obj` as well as any positional and keyword arguments.
At the end of the context, the original method is restored.
"""
orig = getattr(obj, attr)
def patch(*args, **kwargs):
return fun(orig, *args, **kwargs)
setattr(obj, attr, patch)
try:
yield
finally:
setattr(obj, attr, orig)
@contextlib.contextmanager
def handle_process_execution_error(message, info_message, reraise=True):
"""Consistently handle `putils.ProcessExecutionError` exceptions
This context-manager will catch any `putils.ProcessExecutionError`
exceptions raised in the managed block, and generate logging output
accordingly.
The value of the `message` argument will be logged at `logging.ERROR`
level, and the `info_message` argument at `logging.INFO` level. Finally
the command string, exit code, standard output and error output of the
process will be logged at `logging.DEBUG` level.
The `reraise` argument specifies what should happen when a
`putils.ProcessExecutionError` is caught. If it's equal to `True`, the
exception will be re-raised. If it's some other non-`False` object, this
object will be raised instead (so you most likely want it to be some
`Exception`). Any `False` value will result in the exception to be
swallowed.
"""
try:
yield
except putils.ProcessExecutionError as exc:
LOG.error(message)
LOG.info(info_message)
LOG.debug('Command : %s', exc.cmd)
LOG.debug('Exit Code : %r', exc.exit_code)
LOG.debug('StdOut : %s', exc.stdout)
LOG.debug('StdErr : %s', exc.stderr)
if reraise is True:
raise
elif reraise:
raise reraise # pylint: disable=E0702
@contextlib.contextmanager
def temp_snapshot(driver, volume, src_vref):
snapshot = {'volume_name': src_vref['name'],
'volume_id': src_vref['id'],
'volume_size': src_vref['size'],
'name': 'snapshot-clone-%s' % volume['id'],
'id': 'tmp-snap-%s' % volume['id'],
'size': src_vref['size']}
driver.create_snapshot(snapshot)
try:
yield snapshot
finally:
driver.delete_snapshot(snapshot)
@contextlib.contextmanager
def temp_raw_device(driver, volume):
driver._attach_file(volume)
try:
yield
finally:
driver._detach_file(volume)
@contextlib.contextmanager
def temp_lvm_device(driver, volume):
with temp_raw_device(driver, volume):
vg = driver._get_lvm_vg(volume)
vg.activate_vg()
yield vg
class SRBDriver(driver.VolumeDriver):
"""Scality SRB volume driver
This driver manages volumes provisioned by the Scality REST Block driver
Linux kernel module, backed by RESTful storage providers (e.g. Scality
CDMI).
"""
VERSION = '1.1.0'
# Over-allocation ratio (multiplied with requested size) for thin
# provisioning
OVER_ALLOC_RATIO = 2
SNAPSHOT_PREFIX = 'snapshot'
def __init__(self, *args, **kwargs):
super(SRBDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(srb_opts)
self.urls_setup = False
self.backend_name = None
self.base_urls = None
self.root_helper = utils.get_root_helper()
self._attached_devices = {}
def _setup_urls(self):
if not self.base_urls:
message = _("No url configured")
raise exception.VolumeBackendAPIException(data=message)
with handle_process_execution_error(
message=_LE('Cound not setup urls on the Block Driver.'),
info_message=_LI('Error creating Volume'),
reraise=False):
cmd = self.base_urls
path = '/sys/class/srb/add_urls'
putils.execute('tee', path, process_input=cmd,
root_helper=self.root_helper, run_as_root=True)
self.urls_setup = True
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.backend_name = self.configuration.safe_get('volume_backend_name')
base_urls = self.configuration.safe_get('srb_base_urls')
sane_urls = []
if base_urls:
for url in base_urls.split(','):
stripped_url = url.strip()
if ACCEPTED_REST_SERVER.match(stripped_url):
sane_urls.append(stripped_url)
else:
LOG.warning(_LW("%s is not an accepted REST server "
"IP address"), stripped_url)
self.base_urls = ','.join(sane_urls)
self._setup_urls()
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if not self.base_urls:
LOG.warning(_LW("Configuration variable srb_base_urls"
" not set or empty."))
if self.urls_setup is False:
message = _("Could not setup urls properly")
raise exception.VolumeBackendAPIException(data=message)
@classmethod
def _is_snapshot(cls, volume):
return volume['name'].startswith(cls.SNAPSHOT_PREFIX)
@classmethod
def _get_volname(cls, volume):
"""Returns the name of the actual volume
If the volume is a snapshot, it returns the name of the parent volume.
otherwise, returns the volume's name.
"""
name = volume['name']
if cls._is_snapshot(volume):
name = "volume-%s" % (volume['volume_id'])
return name
@classmethod
def _get_volid(cls, volume):
"""Returns the ID of the actual volume
If the volume is a snapshot, it returns the ID of the parent volume.
otherwise, returns the volume's id.
"""
volid = volume['id']
if cls._is_snapshot(volume):
volid = volume['volume_id']
return volid
@classmethod
def _device_name(cls, volume):
volume_id = cls._get_volid(volume)
name = 'cinder-%s' % volume_id
# Device names can't be longer than 32 bytes (incl. \0)
return name[:31]
@classmethod
def _device_path(cls, volume):
return "/dev/" + cls._device_name(volume)
@classmethod
def _escape_snapshot(cls, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith(cls.SNAPSHOT_PREFIX):
return snapshot_name
return '_' + snapshot_name
@classmethod
def _mapper_path(cls, volume):
groupname = cls._get_volname(volume)
name = volume['name']
if cls._is_snapshot(volume):
name = cls._escape_snapshot(name)
# NOTE(vish): stops deprecation warning
groupname = groupname.replace('-', '--')
name = name.replace('-', '--')
return "/dev/mapper/%s-%s" % (groupname, name)
@staticmethod
def _size_int(size_in_g):
try:
return max(int(size_in_g), 1)
except ValueError:
message = (_("Invalid size parameter '%s': Cannot be interpreted"
" as an integer value.")
% size_in_g)
LOG.error(message)
raise exception.VolumeBackendAPIException(data=message)
@classmethod
def _set_device_path(cls, volume):
volume['provider_location'] = cls._get_volname(volume)
return {
'provider_location': volume['provider_location'],
}
@staticmethod
def _activate_lv(orig, *args, **kwargs):
"""Activate lv.
Use with `patched` to patch `lvm.LVM.activate_lv` to ignore `EEXIST`
"""
try:
orig(*args, **kwargs)
except putils.ProcessExecutionError as exc:
if exc.exit_code != 5:
raise
else:
LOG.debug('`activate_lv` returned 5, ignored')
def _get_lvm_vg(self, volume, create_vg=False):
# NOTE(joachim): One-device volume group to manage thin snapshots
# Get origin volume name even for snapshots
volume_name = self._get_volname(volume)
physical_volumes = [self._device_path(volume)]
with patched(lvm.LVM, 'activate_lv', self._activate_lv):
return LVM(volume_name, utils.get_root_helper(),
create_vg=create_vg,
physical_volumes=physical_volumes,
lvm_type='thin', executor=self._execute)
@staticmethod
def _volume_not_present(vg, volume_name):
# Used to avoid failing to delete a volume for which
# the create operation partly failed
return vg.get_volume(volume_name) is None
def _create_file(self, volume):
message = _('Could not create volume on any configured REST server.')
with handle_process_execution_error(
message=message,
info_message=_LI('Error creating Volume %s.') % volume['name'],
reraise=exception.VolumeBackendAPIException(data=message)):
size = self._size_int(volume['size']) * self.OVER_ALLOC_RATIO
cmd = volume['name']
cmd += ' %dG' % size
path = '/sys/class/srb/create'
putils.execute('tee', path, process_input=cmd,
root_helper=self.root_helper, run_as_root=True)
return self._set_device_path(volume)
def _extend_file(self, volume, new_size):
message = _('Could not extend volume on any configured REST server.')
with handle_process_execution_error(
message=message,
info_message=(_LI('Error extending Volume %s.')
% volume['name']),
reraise=exception.VolumeBackendAPIException(data=message)):
size = self._size_int(new_size) * self.OVER_ALLOC_RATIO
cmd = volume['name']
cmd += ' %dG' % size
path = '/sys/class/srb/extend'
putils.execute('tee', path, process_input=cmd,
root_helper=self.root_helper, run_as_root=True)
@staticmethod
def _destroy_file(volume):
message = _('Could not destroy volume on any configured REST server.')
volname = volume['name']
with handle_process_execution_error(
message=message,
info_message=_LI('Error destroying Volume %s.') % volname,
reraise=exception.VolumeBackendAPIException(data=message)):
cmd = volume['name']
path = '/sys/class/srb/destroy'
putils.execute('tee', path, process_input=cmd,
root_helper=utils.get_root_helper(),
run_as_root=True)
# NOTE(joachim): Must only be called within a function decorated by:
# @lockutils.synchronized('devices', 'cinder-srb-')
def _increment_attached_count(self, volume):
"""Increments the attach count of the device"""
volid = self._get_volid(volume)
if volid not in self._attached_devices:
self._attached_devices[volid] = 1
else:
self._attached_devices[volid] += 1
# NOTE(joachim): Must only be called within a function decorated by:
# @lockutils.synchronized('devices', 'cinder-srb-')
def _decrement_attached_count(self, volume):
"""Decrements the attach count of the device"""
volid = self._get_volid(volume)
if volid not in self._attached_devices:
raise exception.VolumeBackendAPIException(
(_("Internal error in srb driver: "
"Trying to detach detached volume %s."))
% (self._get_volname(volume))
)
self._attached_devices[volid] -= 1
if self._attached_devices[volid] == 0:
del self._attached_devices[volid]
# NOTE(joachim): Must only be called within a function decorated by:
# @lockutils.synchronized('devices', 'cinder-srb-')
def _get_attached_count(self, volume):
volid = self._get_volid(volume)
return self._attached_devices.get(volid, 0)
@lockutils.synchronized('devices', 'cinder-srb-')
def _is_attached(self, volume):
return self._get_attached_count(volume) > 0
@lockutils.synchronized('devices', 'cinder-srb-')
def _attach_file(self, volume):
name = self._get_volname(volume)
devname = self._device_name(volume)
LOG.debug('Attaching volume %(name)s as %(devname)s',
{'name': name, 'devname': devname})
count = self._get_attached_count(volume)
if count == 0:
message = (_('Could not attach volume %(vol)s as %(dev)s '
'on system.')
% {'vol': name, 'dev': devname})
with handle_process_execution_error(
message=message,
info_message=_LI('Error attaching Volume'),
reraise=exception.VolumeBackendAPIException(data=message)):
cmd = name + ' ' + devname
path = '/sys/class/srb/attach'
putils.execute('tee', path, process_input=cmd,
root_helper=self.root_helper, run_as_root=True)
else:
LOG.debug('Volume %s already attached', name)
self._increment_attached_count(volume)
@retry(exceptions=(putils.ProcessExecutionError, ),
count=3, sleep_mechanism=retry.SLEEP_INCREMENT, sleep_factor=5)
def _do_deactivate(self, volume, vg):
vg.deactivate_vg()
@retry(exceptions=(putils.ProcessExecutionError, ),
count=5, sleep_mechanism=retry.SLEEP_DOUBLE, sleep_factor=1)
def _do_detach(self, volume, vg):
devname = self._device_name(volume)
volname = self._get_volname(volume)
cmd = devname
path = '/sys/class/srb/detach'
try:
putils.execute('tee', path, process_input=cmd,
root_helper=self.root_helper, run_as_root=True)
except putils.ProcessExecutionError:
with excutils.save_and_reraise_exception(reraise=True):
try:
with patched(lvm.LVM, 'activate_lv', self._activate_lv):
vg.activate_lv(volname)
self._do_deactivate(volume, vg)
except putils.ProcessExecutionError:
LOG.warning(_LW('All attempts to recover failed detach '
'of %(volume)s failed.'),
{'volume': volname})
@lockutils.synchronized('devices', 'cinder-srb-')
def _detach_file(self, volume):
name = self._get_volname(volume)
devname = self._device_name(volume)
vg = self._get_lvm_vg(volume)
LOG.debug('Detaching device %s', devname)
count = self._get_attached_count(volume)
if count > 1:
LOG.info(_LI('Reference count of %(volume)s is %(count)d, '
'not detaching.'),
{'volume': volume['name'], 'count': count})
return
message = (_('Could not detach volume %(vol)s from device %(dev)s.')
% {'vol': name, 'dev': devname})
with handle_process_execution_error(
message=message,
info_message=_LI('Error detaching Volume'),
reraise=exception.VolumeBackendAPIException(data=message)):
try:
if vg is not None:
self._do_deactivate(volume, vg)
except putils.ProcessExecutionError:
LOG.error(_LE('Could not deactivate volume group %s'),
self._get_volname(volume))
raise
try:
self._do_detach(volume, vg=vg)
except putils.ProcessExecutionError:
LOG.error(_LE('Could not detach volume %(vol)s from device '
'%(dev)s.'), {'vol': name, 'dev': devname})
raise
self._decrement_attached_count(volume)
def _setup_lvm(self, volume):
# NOTE(joachim): One-device volume group to manage thin snapshots
size = self._size_int(volume['size']) * self.OVER_ALLOC_RATIO
size_str = '%dg' % size
vg = self._get_lvm_vg(volume, create_vg=True)
vg.create_volume(volume['name'], size_str, lv_type='thin')
def _destroy_lvm(self, volume):
vg = self._get_lvm_vg(volume)
if vg.lv_has_snapshot(volume['name']):
LOG.error(_LE('Unable to delete due to existing snapshot '
'for volume: %s.'),
volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
vg.destroy_vg()
# NOTE(joachim) Force lvm vg flush through a vgs command
vgs = vg.get_all_volume_groups(root_helper=self.root_helper,
vg_name=vg.vg_name)
if len(vgs) != 0:
LOG.warning(_LW('Removed volume group %s still appears in vgs.'),
vg.vg_name)
def _create_and_copy_volume(self, dstvol, srcvol):
"""Creates a volume from a volume or a snapshot."""
updates = self._create_file(dstvol)
# We need devices attached for IO operations.
with temp_lvm_device(self, srcvol) as vg, \
temp_raw_device(self, dstvol):
self._setup_lvm(dstvol)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
with patched(lvm.LVM, 'activate_lv', self._activate_lv):
vg.activate_lv(srcvol['name'], True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self._mapper_path(srcvol),
self._mapper_path(dstvol),
srcvol['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute)
return updates
def create_volume(self, volume):
"""Creates a volume.
Can optionally return a Dictionary of changes to the volume object to
be persisted.
"""
updates = self._create_file(volume)
# We need devices attached for LVM operations.
with temp_raw_device(self, volume):
self._setup_lvm(volume)
return updates
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self._create_and_copy_volume(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
updates = None
with temp_lvm_device(self, src_vref):
with temp_snapshot(self, volume, src_vref) as snapshot:
updates = self._create_and_copy_volume(volume, snapshot)
return updates
def delete_volume(self, volume):
"""Deletes a volume."""
attached = False
if self._is_attached(volume):
attached = True
with temp_lvm_device(self, volume):
self._destroy_lvm(volume)
self._detach_file(volume)
LOG.debug('Deleting volume %(volume_name)s, attached=%(attached)s',
{'volume_name': volume['name'], 'attached': attached})
self._destroy_file(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
with temp_lvm_device(self, snapshot) as vg:
# NOTE(joachim) we only want to support thin lvm_types
vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
lv_type='thin')
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
with temp_lvm_device(self, snapshot) as vg:
if self._volume_not_present(
vg, self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_LW("snapshot: %s not found, "
"skipping delete operations"),
snapshot['name'])
return
vg.delete(self._escape_snapshot(snapshot['name']))
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service."""
stats = {
'vendor_name': 'Scality',
'driver_version': self.VERSION,
'storage_protocol': 'Scality Rest Block Device',
'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite',
'reserved_percentage': 0,
'volume_backend_name': self.backend_name,
}
return stats
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
with temp_lvm_device(self, volume):
image_utils.fetch_to_volume_format(context,
image_service,
image_id,
self._mapper_path(volume),
'qcow2',
self.configuration.
volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
with temp_lvm_device(self, volume):
image_utils.upload_volume(context,
image_service,
image_meta,
self._mapper_path(volume))
def extend_volume(self, volume, new_size):
new_alloc_size = self._size_int(new_size) * self.OVER_ALLOC_RATIO
new_size_str = '%dg' % new_alloc_size
self._extend_file(volume, new_size)
with temp_lvm_device(self, volume) as vg:
vg.pv_resize(self._device_path(volume), new_size_str)
vg.extend_thin_pool()
vg.extend_volume(volume['name'], new_size_str)
class SRBISCSIDriver(SRBDriver, driver.ISCSIDriver):
"""Scality SRB volume driver with ISCSI support
This driver manages volumes provisioned by the Scality REST Block driver
Linux kernel module, backed by RESTful storage providers (e.g. Scality
CDMI), and exports them through ISCSI to Nova.
"""
VERSION = '1.0.0'
def __init__(self, *args, **kwargs):
self.db = kwargs.get('db')
self.target_driver = \
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
super(SRBISCSIDriver, self).__init__(*args, **kwargs)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'SRB_iSCSI'
self.protocol = 'iSCSI'
def set_execute(self, execute):
super(SRBISCSIDriver, self).set_execute(execute)
if self.target_driver is not None:
self.target_driver.set_execute(execute)
def ensure_export(self, context, volume):
device_path = self._mapper_path(volume)
model_update = self.target_driver.ensure_export(context,
volume,
device_path)
if model_update:
self.db.volume_update(context, volume['id'], model_update)
def create_export(self, context, volume, connector):
"""Creates an export for a logical volume."""
self._attach_file(volume)
vg = self._get_lvm_vg(volume)
vg.activate_vg()
# SRB uses the same name as the volume for the VG
volume_path = self._mapper_path(volume)
data = self.target_driver.create_export(context,
volume,
volume_path)
return {
'provider_location': data['location'],
'provider_auth': data['auth'],
}
def remove_export(self, context, volume):
# NOTE(joachim) Taken from iscsi._ExportMixin.remove_export
# This allows us to avoid "detaching" a device not attached by
# an export, and avoid screwing up the device attach refcount.
try:
# Raises exception.NotFound if export not provisioned
iscsi_target = self.target_driver._get_iscsi_target(context,
volume['id'])
# Raises an Exception if currently not exported
location = volume['provider_location'].split(' ')
iqn = location[1]
self.target_driver.show_target(iscsi_target, iqn=iqn)
self.target_driver.remove_export(context, volume)
self._detach_file(volume)
except exception.NotFound:
LOG.warning(_LW('Volume %r not found while trying to remove.'),
volume['id'])
except Exception as exc:
LOG.warning(_LW('Error while removing export: %r'), exc)
| apache-2.0 |
ds0nt/or-tools | examples/python/set_covering4.py | 15 | 4793 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Set partition and set covering in Google CP Solver.
Example from the Swedish book
Lundgren, Roennqvist, Vaebrand
'Optimeringslaera' (translation: 'Optimization theory'),
page 408.
* Set partition:
We want to minimize the cost of the alternatives which covers all the
objects, i.e. all objects must be choosen. The requirement is than an
object may be selected _exactly_ once.
Note: This is 1-based representation
Alternative Cost Object
1 19 1,6
2 16 2,6,8
3 18 1,4,7
4 13 2,3,5
5 15 2,5
6 19 2,3
7 15 2,3,4
8 17 4,5,8
9 16 3,6,8
10 15 1,6,7
The problem has a unique solution of z = 49 where alternatives
3, 5, and 9
is selected.
* Set covering:
If we, however, allow that an object is selected _more than one time_,
then the solution is z = 45 (i.e. less cost than the first problem),
and the alternatives
4, 8, and 10
is selected, where object 5 is selected twice (alt. 4 and 8).
It's an unique solution as well.
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/set_covering4.mzn
* Comet : http://www.hakank.org/comet/set_covering4.co
* ECLiPSe : http://www.hakank.org/eclipse/set_covering4.ecl
* SICStus : http://www.hakank.org/sicstus/set_covering4.pl
* Gecode : http://www.hakank.org/gecode/set_covering4.cpp
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def main(set_partition=1):
# Create the solver.
solver = pywrapcp.Solver("Set partition and set covering")
#
# data
#
num_alternatives = 10
num_objects = 8
# costs for the alternatives
costs = [19, 16, 18, 13, 15, 19, 15, 17, 16, 15]
# the alternatives, and their objects
a = [
# 1 2 3 4 5 6 7 8 the objects
[1, 0, 0, 0, 0, 1, 0, 0], # alternative 1
[0, 1, 0, 0, 0, 1, 0, 1], # alternative 2
[1, 0, 0, 1, 0, 0, 1, 0], # alternative 3
[0, 1, 1, 0, 1, 0, 0, 0], # alternative 4
[0, 1, 0, 0, 1, 0, 0, 0], # alternative 5
[0, 1, 1, 0, 0, 0, 0, 0], # alternative 6
[0, 1, 1, 1, 0, 0, 0, 0], # alternative 7
[0, 0, 0, 1, 1, 0, 0, 1], # alternative 8
[0, 0, 1, 0, 0, 1, 0, 1], # alternative 9
[1, 0, 0, 0, 0, 1, 1, 0] # alternative 10
]
#
# declare variables
#
x = [solver.IntVar(0, 1, "x[%i]" % i) for i in range(num_alternatives)]
#
# constraints
#
# sum the cost of the choosen alternative,
# to be minimized
z = solver.ScalProd(x, costs)
#
for j in range(num_objects):
if set_partition == 1:
solver.Add(
solver.SumEquality([x[i] * a[i][j]
for i in range(num_alternatives)],
1))
else:
solver.Add(
solver.SumGreaterOrEqual([x[i] * a[i][j]
for i in range(num_alternatives)],
1))
objective = solver.Minimize(z, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
solution.AddObjective(z)
collector = solver.LastSolutionCollector(solution)
solver.Solve(solver.Phase([x[i] for i in range(num_alternatives)],
solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT),
[collector, objective])
print "z:", collector.ObjectiveValue(0)
print "selected alternatives:", [i + 1 for i in range(num_alternatives)
if collector.Value(0, x[i]) == 1]
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
if __name__ == "__main__":
print "Set partition:"
main(1)
print "\nSet covering:"
main(0)
| apache-2.0 |
kimvais/cryptography | src/cryptography/hazmat/primitives/serialization.py | 11 | 5152 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import base64
import struct
from enum import Enum
import six
from cryptography import utils
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
def load_pem_private_key(data, password, backend):
return backend.load_pem_private_key(data, password)
def load_pem_public_key(data, backend):
return backend.load_pem_public_key(data)
def load_der_private_key(data, password, backend):
return backend.load_der_private_key(data, password)
def load_der_public_key(data, backend):
return backend.load_der_public_key(data)
def load_ssh_public_key(data, backend):
key_parts = data.split(b' ', 2)
if len(key_parts) < 2:
raise ValueError(
'Key is not in the proper format or contains extra data.')
key_type = key_parts[0]
if key_type == b'ssh-rsa':
loader = _load_ssh_rsa_public_key
elif key_type == b'ssh-dss':
loader = _load_ssh_dss_public_key
elif key_type in [
b'ecdsa-sha2-nistp256', b'ecdsa-sha2-nistp384', b'ecdsa-sha2-nistp521',
]:
loader = _load_ssh_ecdsa_public_key
else:
raise UnsupportedAlgorithm('Key type is not supported.')
key_body = key_parts[1]
try:
decoded_data = base64.b64decode(key_body)
except TypeError:
raise ValueError('Key is not in the proper format.')
inner_key_type, rest = _read_next_string(decoded_data)
if inner_key_type != key_type:
raise ValueError(
'Key header and key body contain different key type values.'
)
return loader(key_type, rest, backend)
def _load_ssh_rsa_public_key(key_type, decoded_data, backend):
e, rest = _read_next_mpint(decoded_data)
n, rest = _read_next_mpint(rest)
if rest:
raise ValueError('Key body contains extra bytes.')
return rsa.RSAPublicNumbers(e, n).public_key(backend)
def _load_ssh_dss_public_key(key_type, decoded_data, backend):
p, rest = _read_next_mpint(decoded_data)
q, rest = _read_next_mpint(rest)
g, rest = _read_next_mpint(rest)
y, rest = _read_next_mpint(rest)
if rest:
raise ValueError('Key body contains extra bytes.')
parameter_numbers = dsa.DSAParameterNumbers(p, q, g)
public_numbers = dsa.DSAPublicNumbers(y, parameter_numbers)
return public_numbers.public_key(backend)
def _load_ssh_ecdsa_public_key(expected_key_type, decoded_data, backend):
curve_name, rest = _read_next_string(decoded_data)
data, rest = _read_next_string(rest)
if expected_key_type != b"ecdsa-sha2-" + curve_name:
raise ValueError(
'Key header and key body contain different key type values.'
)
if rest:
raise ValueError('Key body contains extra bytes.')
curve = {
b"nistp256": ec.SECP256R1,
b"nistp384": ec.SECP384R1,
b"nistp521": ec.SECP521R1,
}[curve_name]()
if six.indexbytes(data, 0) != 4:
raise NotImplementedError(
"Compressed elliptic curve points are not supported"
)
# key_size is in bits, and sometimes it's not evenly divisible by 8, so we
# add 7 to round up the number of bytes.
if len(data) != 1 + 2 * ((curve.key_size + 7) // 8):
raise ValueError("Malformed key bytes")
x = utils.int_from_bytes(
data[1:1 + (curve.key_size + 7) // 8], byteorder='big'
)
y = utils.int_from_bytes(
data[1 + (curve.key_size + 7) // 8:], byteorder='big'
)
return ec.EllipticCurvePublicNumbers(x, y, curve).public_key(backend)
def _read_next_string(data):
"""
Retrieves the next RFC 4251 string value from the data.
While the RFC calls these strings, in Python they are bytes objects.
"""
str_len, = struct.unpack('>I', data[:4])
return data[4:4 + str_len], data[4 + str_len:]
def _read_next_mpint(data):
"""
Reads the next mpint from the data.
Currently, all mpints are interpreted as unsigned.
"""
mpint_data, rest = _read_next_string(data)
return (
utils.int_from_bytes(mpint_data, byteorder='big', signed=False), rest
)
class Encoding(Enum):
PEM = "PEM"
DER = "DER"
class PrivateFormat(Enum):
PKCS8 = "PKCS8"
TraditionalOpenSSL = "TraditionalOpenSSL"
class PublicFormat(Enum):
SubjectPublicKeyInfo = "X.509 subjectPublicKeyInfo with PKCS#1"
PKCS1 = "Raw PKCS#1"
@six.add_metaclass(abc.ABCMeta)
class KeySerializationEncryption(object):
pass
@utils.register_interface(KeySerializationEncryption)
class BestAvailableEncryption(object):
def __init__(self, password):
if not isinstance(password, bytes) or len(password) == 0:
raise ValueError("Password must be 1 or more bytes.")
self.password = password
@utils.register_interface(KeySerializationEncryption)
class NoEncryption(object):
pass
| bsd-3-clause |
labordoc/labordoc-next | modules/bibformat/lib/bibformat_templates.py | 1 | 86532 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""HTML Templates for BibFormat administration"""
__revision__ = "$Id$"
# non Invenio imports
import cgi
# Invenio imports
from invenio.messages import gettext_set_language
from invenio.config import CFG_SITE_URL, CFG_SITE_SECURE_URL
from invenio.messages import language_list_long
from invenio.config import CFG_PATH_PHP
MAX_MAPPINGS = 100 #show max this number of mappings on one page
class Template:
"""Templating class, refer to bibformat.py for examples of call"""
def tmpl_admin_index(self, ln, warnings, is_admin):
"""
Returns the main BibFormat admin page.
@param ln: language
@param warnings: a list of warnings to display at top of page. None if no warning
@param is_admin: indicate if user is authorized to use BibFormat
@return: main BibFormat admin page
"""
_ = gettext_set_language(ln) # load the right message language
out = ''
if warnings:
out += '''
<table width="66%%" class="errorbox" style="margin-left: auto; margin-right: auto;">
<tr>
<th class="errorboxheader">
%(warnings)s
</th>
</tr>
</table>
''' % {'warnings': '<br/>'.join(warnings)}
out += '''
<p>
This is where you can edit the formatting styles available for the records. '''
if not is_admin:
out += '''You need to
<a href="%(siteurl)s/youraccount/login?referer=%(siteurl)s/admin/bibformat/bibformatadmin.py">login</a> to enter.
''' % {'siteurl':CFG_SITE_URL}
out += '''
</p>
<dl>
<dt><a href="%(siteurl)s/admin/bibformat/bibformatadmin.py/format_templates_manage?ln=%(ln)s">Manage Format Templates</a></dt>
<dd>Define how to format a record.</dd>
</dl>
<dl>
<dt><a href="%(siteurl)s/admin/bibformat/bibformatadmin.py/output_formats_manage?ln=%(ln)s">Manage Output Formats</a></dt>
<dd>Define which template is applied to which record for a given output.</dd>
</dl>
<br/>
<dl>
<dt><a href="%(siteurl)s/admin/bibformat/bibformatadmin.py/format_elements_doc?ln=%(ln)s">Format Elements Documentation</a></dt>
<dd>Documentation of the format elements to be used inside format templates.</dd>
</dl>
<dl>
<dt><a href="%(siteurl)s/help/admin/bibformat-admin-guide">BibFormat Admin Guide</a></dt>
<dd>Documentation about BibFormat administration</dd>
</dl>
'''% {'siteurl':CFG_SITE_URL, 'ln':ln}
if CFG_PATH_PHP:
#Show PHP admin only if PHP is enabled
out += '''
<br/><br/><br/><br/>
<div style="background-color:rgb(204, 204, 204);">
<h2><span style="color:rgb(204, 0, 0);">Old</span>
BibFormat admin interface (in gray box)</h2>
<em>
<p>The BibFormat admin interface enables you to specify how the
bibliographic data is presented to the end user in the search
interface and search results pages. For example, you may specify that
titles should be printed in bold font, the abstract in small italic,
etc. Moreover, the BibFormat is not only a simple bibliographic data
<em>output formatter</em>, but also an automated <em>link
constructor</em>. For example, from the information on journal name
and pages, it may automatically create links to publisher's site based
on some configuration rules.
<h2>Configuring BibFormat</h2>
<p>By default, a simple HTML format based on the most common fields
(title, author, abstract, keywords, fulltext link, etc) is defined.
You certainly want to define your own ouput formats in case you have a
specific metadata structure.
<p>Here is a short guide of what you can configure:
<blockquote>
<dl>
<dt><a href="BEH_display.php">Behaviours</a>
<dd>Define one or more output BibFormat behaviours. These are then
passed as parameters to the BibFormat modules while executing
formatting.
<br /><em>Example:</em> You can tell BibFormat that is has to enrich the
incoming metadata file by the created format, or that it only has to
print the format out.
<dt><a href="OAIER_display.php">Extraction Rules</a>
<dd>Define how the metadata tags from input are mapped into internal
BibFormat variable names. The variable names can afterwards be used
in formatting and linking rules.
<br /><em>Example:</em> You can tell that <code>100 $a</code> field
should be mapped into <code>$100.a</code> internal variable that you
could use later.
<dt><a href="LINK_display.php">Link Rules</a>
<dd>Define rules for automated creation of URI links from mapped
internal variables.
<br /><em>Example:</em> You can tell a rule how to create a link to
People database out of the <code>$100.a</code> internal variable
repesenting author's name. (The <code>$100.a</code> variable was mapped
in the previous step, see the Extraction Rules.)
<dt><a href="LINK_FORMAT_display.php">File Formats</a>
<dd>Define file format types based on file extensions. This will be
used when proposing various fulltext services.
<br /><em>Example:</em> You can tell that <code>*.pdf</code> files will
be treated as PDF files.
<dt><a href="UDF_display.php">User Defined Functions (UDFs)</a>
<dd>Define your own functions that you can reuse when creating your
own output formats. This enables you to do complex formatting without
ever touching the BibFormat core code.
<br /><em>Example:</em> You can define a function how to match and
extract email addresses out of a text file.
<dt><a href="FORMAT_display.php">Formats</a>
<dd>Define the output formats, i.e. how to create the output out of
internal BibFormat variables that were extracted in a previous step.
This is the functionality you would want to configure most of the
time. It may reuse formats, user defined functions, knowledge bases,
etc.
<br /><em>Example:</em> You can tell that authors should be printed in
italic, that if there are more than 10 authors only the first three
should be printed, etc.
<dt><a href="KB_display.php">Knowledge Bases (KBs)</a>
<dd>Define one or more knowledge bases that enables you to transform
various forms of input data values into the unique standard form on
the output.
<br /><em>Example:</em> You can tell that <em>Phys Rev D</em> and
<em>Physical Review D</em> are both the same journal and that these
names should be standardized to <em>Phys Rev : D</em>.
<dt><a href="test.php">Execution Test</a>
<dd>Enables you to test your formats on your sample data file. Useful
when debugging newly created formats.
</dl>
</blockquote>
<p>To learn more on BibFormat configuration, you can consult the <a
href="guide.html">BibFormat Admin Guide</a>.</small>
<h2>Running BibFormat</h2>
<h3>From the Web interface</h3>
<p>
Run <a href="BIBREFORMAT_display.php">Reformat Records</a> tool.
This tool permits you to update stored formats for bibliographic records.
<br />
It should normally be used after configuring BibFormat's
<a href="BEH_display.php">Behaviours</a> and
<a href="FORMAT_display.php">Formats</a>.
When these are ready, you can choose to rebuild formats for selected
collections or you can manually enter a search query and the web interface
will accomplish all necessary formatting steps.
<br />
<i>Example:</i> You can request Photo collections to have their HTML
brief formats rebuilt, or you can reformat all the records written by Ellis.
<h3>From the command-line interface</h3>
<p>Consider having an XML MARC data file that is to be uploaded into
the Invenio. (For example, it might have been harvested from other
sources and processed via <a href="../bibconvert/">BibConvert</a>.)
Having configured BibFormat and its default output type behaviour, you
would then run this file throught BibFormat as follows:
<blockquote>
<pre>
$ bibformat < /tmp/sample.xml > /tmp/sample_with_fmt.xml
<pre>
</blockquote>
that would create default HTML formats and would "enrich" the input
XML data file by this format. (You would then continue the upload
procedure by calling successively <a
href="../bibupload/">BibUpload</a> and <a
href="../bibindex/">BibIndex</a>.)
<p>Now consider a different situation. You would like to add a new
possible format, say "HTML portfolio" and "HTML captions" in order to
nicely format multiple photographs in one page. Let us suppose that
these two formats are called <code>hp</code> and <code>hc</code> and
are already loaded in the <code>collection_format</code> table.
(TODO: describe how this is done via WebAdmin.) You would then
proceed as follows: firstly, you would prepare the corresponding <a
href="BEH_display.php">output behaviours</a> called <code>HP</code>
and <code>HC</code> (TODO: note the uppercase!) that would not enrich
the input file but that would produce an XML file with only
<code>001</code> and <code>FMT</code> tags. (This is in order not to
update the bibliographic information but the formats only.) You would
also prepare corresponding <a href="FORMAT_display.php">formats</a>
at the same time. Secondly, you would launch the formatting as
follows:
<blockquote>
<pre>
$ bibformat otype=HP,HC < /tmp/sample.xml > /tmp/sample_fmts_only.xml
<pre>
</blockquote>
that should give you an XML file containing only 001 and FMT tags.
Finally, you would upload the formats:
<blockquote>
<pre>
$ bibupload < /tmp/sample_fmts_only.xml
<pre>
</blockquote>
and that's it. The new formats should now appear in <a
href="%(siteurl)s">WebSearch</a>.
</em>
</div>
''' % {'siteurl':CFG_SITE_URL, 'ln':ln}
return out
def tmpl_admin_format_template_show_attributes(self, ln, name, description, filename, editable,
all_templates=[], new=False):
"""
Returns a page to change format template name and description
If template is new, offer a way to create a duplicate from an
existing template
@param ln: language
@param name: the name of the format
@param description: the description of the format
@param filename: the filename of the template
@param editable: True if we let user edit, else False
@param all_templates: a list of tuples (filename, name) of all other templates
@param new: if True, the format template has just been added (is new)
@return: editor for 'format'
"""
_ = gettext_set_language(ln) # load the right message language
out = ""
out += '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="format_templates_manage?ln=%(ln)s">%(close_editor)s</a></small> </td>
<td>1. <small><a href="format_template_show?ln=%(ln)s&bft=%(filename)s">%(template_editor)s</a></small> </td>
<td>2. <small>%(modify_template_attributes)s</small> </td>
<td>3. <small><a href="format_template_show_dependencies?ln=%(ln)s&bft=%(filename)s">%(check_dependencies)s</a></small> </td>
</tr>
</table><br/>
''' % {'ln':ln,
'menu':_("Menu"),
'filename':filename,
'close_editor': _("Close Editor"),
'modify_template_attributes': _("Modify Template Attributes"),
'template_editor': _("Template Editor"),
'check_dependencies': _("Check Dependencies")
}
disabled = ""
readonly = ""
if not editable:
disabled = 'disabled="disabled"'
readonly = 'readonly="readonly"'
out += '''
<form action="format_template_update_attributes?ln=%(ln)s&bft=%(filename)s" method="POST">
''' % {'ln':ln,
'filename':filename}
if new:
#Offer the possibility to make a duplicate of existing format template code
out += '''
<table><tr>
<th class="adminheaderleft">Make a copy of format template: [<a href="%(siteurl)s/help/admin/bibformat-admin-guide#addFormatTemplate">?</a>]</th>
</tr>
<tr>
<td><select tabindex="1" name="duplicate" id="duplicate" %(readonly)s>
<option value="">None (Blank Page)</option>
<option value="" disabled="disabled">-------------</option>
''' % {'siteurl': CFG_SITE_URL,
'readonly':readonly}
for (o_filename, o_name) in all_templates:
out += '''<option value="%(template_filename)s">%(template_name)s</option>''' % {'template_name':o_name,
'template_filename': o_filename}
out += ''' </select>
</td></tr></table>'''
out += '''
<table><tr>
<th colspan="2" class="adminheaderleft">%(name)s attributes [<a href="%(siteurl)s/help/admin/bibformat-admin-guide#attrsFormatTemplate">?</a>]</th>
</tr>
<tr>
<td class="admintdright">
<input type="hidden" name="key" value="%(name)s"/>
<label for="name">%(name_label)s</label>: </td>
<td><input tabindex="2" name="name" type="text" id="name" size="25" value="%(name)s" %(readonly)s/>
<input type="hidden" value="%(filename)s"/>
</td>
</tr>
''' % {"name": name,
'ln':ln,
'filename':filename,
'disabled':disabled,
'readonly':readonly,
'name_label': _("Name"),
'siteurl':CFG_SITE_URL
}
out += '''
<tr>
<td class="admintdright" valign="top"><label for="description">%(description_label)s</label>: </td>
<td><textarea tabindex="3" name="description" id="description" rows="4" cols="25" %(readonly)s>%(description)s</textarea> </td>
</tr>
<tr>
<td> </td>
<td align="right"><input tabindex="6" class="adminbutton" type="submit" value="%(update_format_attributes)s" %(disabled)s/></td>
</tr>
</table></form>
''' % {"description": description,
'ln':ln,
'filename':filename,
'disabled':disabled,
'readonly':readonly,
'description_label': _("Description"),
'update_format_attributes': _("Update Format Attributes"),
'siteurl':CFG_SITE_URL
}
return out
def tmpl_admin_format_template_show_dependencies(self, ln, name, filename, output_formats, format_elements, tags):
"""
Shows the dependencies (on elements) of the given format.
@param ln: language
@param name: the name of the template
@param filename: the filename of the template
@param format_elements: the elements (and list of tags in each element) this template depends on
@param output_formats: the output format that depend on this template
@param tags: the tags that are called by format elements this template depends on.
@return: HTML markup
"""
_ = gettext_set_language(ln) # load the right message language
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="format_templates_manage?ln=%(ln)s">%(close_editor)s</a> </small></td>
<td>1. <small><a href="format_template_show?ln=%(ln)s&bft=%(filename)s">%(template_editor)s</a></small> </td>
<td>2. <small><a href="format_template_show_attributes?ln=%(ln)s&bft=%(filename)s">%(modify_template_attributes)s</a></small> </td>
<td>3. <small>%(check_dependencies)s</small> </td>
</tr>
</table>
<table width="90%%" class="admin_wvar" cellspacing="0"><tr>
<th class="adminheaderleft">Output Formats that use %(name)s</th>
<th class="adminheaderleft">Format Elements used by %(name)s*</th>
<th class="adminheaderleft">All Tags Called*</th>
</tr>
<tr>
<td valign="top"> <br/>
''' % {'ln':ln,
'filename':filename,
'menu': _("Menu"),
'close_editor': _("Close Editor"),
'modify_template_attributes': _("Modify Template Attributes"),
'template_editor': _("Template Editor"),
'check_dependencies': _("Check Dependencies"),
'name': name }
#Print output formats
if len(output_formats) == 0:
out += '<p align="center"><i>No output format uses this format template.</i></p>'
for output_format in output_formats:
name = output_format['names']['generic']
filename = output_format['filename']
out += ''' <a href="output_format_show?ln=%(ln)s&bfo=%(filename)s">%(name)s</a>''' % {'filename':filename,
'name':name,
'ln':ln}
if len(output_format['tags']) > 0:
out += "("+", ".join(output_format['tags'])+")"
out += "<br/>"
#Print format elements (and tags)
out += '</td><td valign="top"> <br/>'
if len(format_elements) == 0:
out += '<p align="center"><i>This format template uses no format element.</i></p>'
for format_element in format_elements:
name = format_element['name']
out += ''' <a href="format_elements_doc?ln=%(ln)s#%(anchor)s">%(name)s</a>''' % {'name':"bfe_"+name.lower(),
'anchor':name.upper(),
'ln':ln}
if len(format_element['tags']) > 0:
out += "("+", ".join(format_element['tags'])+")"
out += "<br/>"
#Print tags
out += '</td><td valign="top"> <br/>'
if len(tags) == 0:
out += '<p align="center"><i>This format template uses no tag.</i></p>'
for tag in tags:
out += '''%(tag)s<br/>''' % { 'tag':tag}
out += '''
</td>
</tr>
</table>
<b>*Note</b>: Some tags linked with this format template might not be shown. Check manually.
'''
return out
def tmpl_admin_format_template_show(self, ln, name, description, code, filename, ln_for_preview, pattern_for_preview, editable, content_type_for_preview, content_types):
"""
Returns the editor for format templates. Edit format with given X{name}
@param ln: language
@param name: the format to edit
@param description: the description of the format template
@param code: the code of the template of the editor
@param filename: the filename of the template
@param ln_for_preview: the language for the preview (for bfo)
@param pattern_for_preview: the search pattern to be used for the preview (for bfo)
@param editable: True if we let user edit, else False
@param content_type_for_preview: content-type to use for preview
@param content_types: list of available content-types
@return: editor for 'format'
"""
_ = gettext_set_language(ln) # load the right message language
out = ""
# If xsl, hide some options in the menu
nb_menu_options = 4
if filename.endswith('.xsl'):
nb_menu_options = 2
out += '''
<style type="text/css">
<!--
.ed_button {
font-size: x-small;
}
-->
</style>
<script src="%(siteurl)s/static/bibformat-admin-interface/js_quicktags.js" type="text/javascript"></script>
<script type="text/javascript">
/* Ask user confirmation before leaving page */
var user_must_confirm_before_leaving_page = false;
window.onbeforeunload = confirmExit;
function confirmExit() {
if (user_must_confirm_before_leaving_page)
return "%(leave_editor_message)s";
}
function getByID( id ) {
if (document.getElementById)
var returnVar = document.getElementById(id);
else if (document.all)
var returnVar = document.all[id];
else if (document.layers)
var returnVar = document.layers[id];
return returnVar;
}
window.onresize= resizeViews;
window.onload= prepareLayout;
function prepareLayout(){
resizeViews();
}
function resizeViews(){
var myWidth = 0, myHeight = 0;
if( typeof( window.innerWidth ) == 'number' ) {
//Non-IE
myWidth = window.innerWidth;
myHeight = window.innerHeight;
} else if( document.documentElement && ( document.documentElement.clientWidth || document.documentElement.clientHeight ) ) {
//IE 6+ in 'standards compliant mode'
myWidth = document.documentElement.clientWidth;
myHeight = document.documentElement.clientHeight;
} else if( document.body && ( document.body.clientWidth || document.body.clientHeight ) ) {
//IE 4 compatible
myWidth = document.body.clientWidth;
myHeight = document.body.clientHeight;
}
if (myHeight <= 400) {
getByID("code").style.height=10;
getByID("previewiframe").style.height=10;
} else{
getByID("code").style.height=((myHeight-400)/2);
getByID("previewiframe").style.height=((myHeight-400)/2);
}
getByID("previewiframe").style.height=200;
// Resize documentation
var height = document.documentElement.clientHeight;
height -= getByID('shortDocFrame').offsetTop
//height -= 20;
getByID('shortDocFrame').style.height = height +"px";
}
</script>
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="%(nb_menu_options)s" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="format_templates_manage?ln=%(ln)s">%(close_editor)s</a></small> </td>
<td>1. <small>%(template_editor)s</small> </td>
''' % {'ln': ln, 'filename': filename,
'menu': _("Menu"),
'label_show_doc': _("Show Documentation"),
'label_hide_doc': _("Hide Documentation"),
'close_editor': _("Close Editor"),
'modify_template_attributes': _("Modify Template Attributes"),
'template_editor': _("Template Editor"),
'check_dependencies': _("Check Dependencies"),
'nb_menu_options': nb_menu_options,
'siteurl': CFG_SITE_SECURE_URL or CFG_SITE_URL,
'leave_editor_message': _('Your modifications will not be saved.').replace('"', '\\"')
}
if not filename.endswith('.xsl'):
out +='''<td>2. <small><a href="format_template_show_attributes?ln=%(ln)s&bft=%(filename)s">%(modify_template_attributes)s</a></small> </td>
<td>3. <small><a href="format_template_show_dependencies?ln=%(ln)s&bft=%(filename)s">%(check_dependencies)s</a></small> </td>
''' % {'ln': ln, 'filename': filename,
'menu': _("Menu"),
'label_show_doc': _("Show Documentation"),
'label_hide_doc': _("Hide Documentation"),
'close_editor': _("Close Editor"),
'modify_template_attributes': _("Modify Template Attributes"),
'template_editor': _("Template Editor"),
'check_dependencies': _("Check Dependencies"),
'siteurl': CFG_SITE_SECURE_URL or CFG_SITE_URL
}
out +='''
</tr>
</table>
<script type="text/javascript">
function toggle_doc_visibility(){
var doc = document.getElementById('docTable');
var link = document.getElementById('docLink');
if (doc.style.display=='none'){
doc.style.display = '';
link.innerHTML = "%(label_hide_doc)s"
} else {
doc.style.display = 'none';
link.innerHTML = "%(label_show_doc)s"
}
}
</script>
''' % {'ln': ln, 'filename': filename,
'menu': _("Menu"),
'label_show_doc': _("Show Documentation"),
'label_hide_doc': _("Hide Documentation"),
'close_editor': _("Close Editor"),
'modify_template_attributes': _("Modify Template Attributes"),
'template_editor': _("Template Editor"),
'check_dependencies': _("Check Dependencies"),
'siteurl': CFG_SITE_SECURE_URL or CFG_SITE_URL
}
disabled = ""
readonly = ""
toolbar = """<script type="text/javascript">edToolbar('%s/admin/bibformat/bibformatadmin.py/format_elements_doc?ln=%s');</script>""" % (CFG_SITE_URL, ln)
if not editable:
disabled = 'disabled="disabled"'
readonly = 'readonly="readonly"'
toolbar = ''
#First column: template code and preview
out += '''
<table width="90%%" cellspacing="5">
<tr>
<td valign="top">
<form action="format_template_show_preview_or_save?ln=%(ln)s&bft=%(filename)s" method="POST" target="previewiframe">
<table width="100%%" id="mainTable"><tr>
<th class="adminheaderleft"><div style="float:left;">Format template code</div>
<div style="float:right;">
<a id="docLink" href="#" onclick="toggle_doc_visibility()">%(label_hide_doc)s</a>
</div>
</th>
</tr>
<tr><td colspan="2" id="codetd">
%(toolbar)s
<textarea name="code" id="code" rows="25" %(readonly)s
style="width:100%%" onchange="user_must_confirm_before_leaving_page=true;">%(code)s</textarea>
<script type="text/javascript">var edCanvas = document.getElementById('code');</script>
</td></tr>
<tr><td align="right" valign="top">
<input type="submit" class="adminbutton" name="save_action" value="Save Changes" onclick="user_must_confirm_before_leaving_page=false;" %(disabled)s/>
</td>
</tr>
</table>
<table width="100%%">
<tr><th class="adminheaderleft">
Preview
</th>
</tr>
<tr><td align="right" valign="top" style="font-size: small;">
<nobr>
<label for="content_type_for_preview">Content-type (MIME):</label> <select id="content_type_for_preview" name="content_type_for_preview" style="font-size: x-small;">
''' % {'ln':ln,
'siteurl':CFG_SITE_URL,
'filename':filename,
'label_hide_doc':_("Hide Documentation"),
'code':code,
'readonly':readonly,
'disabled':disabled,
'toolbar':toolbar}
for content_type in content_types:
if content_type == content_type_for_preview:
out += '''<option value="%(content_type)s" selected="selected">%(content_type)s</option>''' % {'content_type':content_type}
else:
out += '''<option value="%(content_type)s">%(content_type)s</option>''' % {'content_type':content_type}
out += '''
</select></nobr>
<nobr><label for="ln_for_preview">Language:</label> <select id="ln_for_preview" name="ln_for_preview" style="font-size: x-small;">
'''
for lang in language_list_long():
if lang[0] == ln_for_preview:
out += '''<option value="%(ln)s" selected="selected">%(language)s</option>''' % {'ln':lang[0],
'language':lang[1]}
else:
out += '''<option value="%(ln)s">%(language)s</option>''' % {'ln':lang[0], 'language':lang[1]}
out += '''
</select></nobr>
<nobr><label for="pattern_for_preview">Search Pattern: </label><input type="text" value="%(pattern_for_preview)s" size="8" name="pattern_for_preview" id="pattern_for_preview" style="font-size: x-small;"/></nobr>
<input type="submit" class="adminbutton" name="preview_action" value="Reload Preview"/>
</td>
</tr>
<tr><td>
<iframe src ="%(siteurl)s/admin/bibformat/bibformatadmin.py/format_template_show_preview_or_save?ln=%(ln)s&ln_for_preview=%(ln_for_preview)s&pattern_for_preview=%(pattern_for_preview)s&bft=%(filename)s" name="previewiframe" id="previewiframe" width="100%%" height="400"></iframe>
</td></tr>
</table>
</form>
</td>
''' % {'code':code, 'ln':ln,
'siteurl':CFG_SITE_URL, 'filename':filename,
'ln_for_preview':ln_for_preview,
'pattern_for_preview':pattern_for_preview
}
#Second column Print documentation
out += '''
<td valign="top" id="docTable">
<table width="100%%"><tr>
<th class="adminheaderleft">Elements Documentation</th>
</tr>
</table>
<table width="100%%"><tr>
<td class="admintdright">
<form action="format_template_show_short_doc?ln=%(ln)s" method="POST" target="shortDocFrame">
<nobr><label for="search_doc_pattern">Search for: </label><input type="text" size="15" name="search_doc_pattern" id="search_doc_pattern" value=""/> <input type="submit" class="adminbutton" name="search_in_doc" value="Search" /></nobr>
</form>
</td>
</tr>
</table>
<iframe name="shortDocFrame" id="shortDocFrame" src ="%(siteurl)s/admin/bibformat/bibformatadmin.py/format_template_show_short_doc?ln=%(ln)s" height="90%%" width="98%%"></iframe>
</td>
</tr>
</table>
''' % {'siteurl':CFG_SITE_URL, 'ln':ln}
return out
def tmpl_admin_format_template_show_short_doc(self, ln, format_elements):
"""
Prints the format element documentation in a condensed way to display
inside format template editor.
This page is different from others: it is displayed inside a <iframe>
tag in template tmpl_admin_format_template_show.
@param ln: language
@param format_elements: a list of format elements structures as returned by get_format_elements
@return: HTML markup
"""
out = '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>BibFormat Short Documentation of Format Elements</title>
<link rel="stylesheet" href="%(siteurl)s/css/invenio.css">
<script src="%(siteurl)s/static/bibformat-admin-interface/js_quicktags.js" type="text/javascript"></script>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head>
<body>
<script type="text/javascript">
function toggle_visibility(element, show, r,g,b){
var children = element.childNodes
var child
for(x=0; x<children.length; x++){
if (children[x].id == 'params'){
child = children[x]
}
}
if (show=='show'){
element.style.background='rgb(201, 218, 255)'
element.style.cursor='pointer'
child.style.display=''
} else {
element.style.background="rgb("+r+","+g+","+b+")"
child.style.display='none'
}
}
///// FROM JS QuickTags ///////
// Copyright (c) 2002-2005 Alex King
// http://www.alexking.org/
//
// Licensed under the LGPL license
// http://www.gnu.org/copyleft/lesser.html
function insertAtCursor(myField, myValue) {
//IE support
if (document.selection) {
myField.focus();
sel = document.selection.createRange();
sel.text = myValue;
}
//MOZILLA/NETSCAPE support
else if (myField.selectionStart || myField.selectionStart == '0') {
var startPos = myField.selectionStart;
var endPos = myField.selectionEnd;
myField.value = myField.value.substring(0, startPos)
+ myValue
+ myField.value.substring(endPos, myField.value.length);
} else {
myField.value += myValue;
}
}
///// END FROM JS QuickTags /////
function insert_my_code_into_container(code){
var codeArea = parent.document.getElementById("code");
if (codeArea.readOnly == false){
//var clean_code = code.replace(=#,'="');
//clean_code = clean_code.replace(# ,'" ');
insertAtCursor(codeArea, code);
}
}
</script>
''' % {'siteurl': CFG_SITE_SECURE_URL or CFG_SITE_URL}
if len(format_elements) == 0:
out += '''
<em>No format elements found</em>
'''
else:
line = 0
#Print elements doc
for format_element in format_elements:
format_attributes = format_element['attrs']
row_content = ""
name = format_attributes['name']
description = format_attributes['description']
params = [x['name'] + '=\u0022'+str(x['default'])+'\u0022' for x in format_attributes['params']]
builtin_params = [x['name'] + '=\u0022'+str(x['default'])+'\u0022' for x in format_attributes['builtin_params']]
code = "<BFE_" + name + ' ' + ' '.join(builtin_params)+ ' ' + ' '.join(params) +"/>"
if line % 2:
row_content += '''<div onmouseover="toggle_visibility(this, 'show', 235, 247, 255);"
onmouseout="toggle_visibility(this, 'hide', 235, 247, 255);"
style="background-color: rgb(235, 247, 255);"
onclick="insert_my_code_into_container('%s')"
><hr/>''' % code
else:
row_content += '''<div onmouseover="toggle_visibility(this, 'show', 255, 255, 255);"
onmouseout="toggle_visibility(this, 'hide', 255, 255, 255);"
onclick="insert_my_code_into_container('%s')"
>''' % code
params_names = ""
for param in format_attributes['params']:
params_names += "<b>"+param['name'] +'</b> '
row_content += '''
<code> <b><BFE_%(name)s/></b><br/></code>
<small>%(description)s.</small>
<div id="params" style="display:none;">
<ul>
''' % {'params_names':params_names, 'name':name, 'description':description}
for param in format_attributes['params']:
row_content += '''
<li><small><b>%(name)s</b>: %(description)s</small></li>
''' % {'name':param['name'],
'description':param['description']}
for param in format_attributes['builtin_params']:
row_content += '''
<li><small><b>%(name)s</b>: %(description)s</small></li>
''' % {'name':param['name'],
'description':param['description']}
row_content += '</ul></div>'
if line % 2:
row_content += '''<hr/></div>'''
else:
row_content += '</div>'
line += 1
out += row_content
out += '''</body></html>'''
return out
def tmpl_admin_format_templates_management(self, ln, formats):
"""
Returns the management console for formats. Includes list of formats and
associated administration tools.
@param ln: language
@param formats: a list of dictionaries with formats attributes
@return: format management console as html
"""
_ = gettext_set_language(ln) # load the right message language
#top of the page and table header
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small>%(manage_format_templates)s</small> </td>
<td>1. <small><a href="output_formats_manage?ln=%(ln)s">%(manage_output_formats)s</a> </td>
<td>2. <small><a href="format_elements_doc?ln=%(ln)s">%(format_elements_documentation)s</a></small> </td>
</tr>
</table>
<p>From here you can create, edit or delete formats templates.
Have a look at the <a href="format_elements_doc?ln=%(ln)s">format elements documentation</a> to
learn which elements you can use in your templates.</p>
<table class="admin_wvar" width="95%%" cellspacing="0">
<tr>
<th class="adminheaderleft" > </th>
<th class="adminheaderleft" >%(name)s</th>
<th class="adminheaderleft" >%(description)s</th>
<th class="adminheaderleft" >%(status)s</th>
<th class="adminheaderleft" >%(last_modification_date)s</th>
<th class="adminheadercenter" >%(action)s [<a href="%(siteurl)s/help/admin/bibformat-admin-guide#formatTemplates">?</a>]</th>
</tr>
''' % {'name':_("Name"),
'description':_("Description"),
'menu': _("Menu"),
'status':_("Status"),
'last_modification_date':_("Last Modification Date"),
'action':_("Action"),
'ln':ln,
'manage_output_formats':_("Manage Output Formats"),
'manage_format_templates':_("Manage Format Templates"),
'format_elements_documentation':_("Format Elements Documentation"),
'siteurl':CFG_SITE_URL}
#table content: formats names, description and buttons
if len(formats) == 0:
out += '''<tr>
<td colspan="6" class="admintd" align="center"><em>No format</em></td>
</tr>'''
else:
line = 0
for attrs in formats:
filename = attrs['filename']
if filename == "":
filename = " "
name = attrs['name']
if name == "":
name = " "
description = attrs['description']
if description == "":
description = " "
last_mod_date = attrs['last_mod_date']
status = attrs['status']
disabled = ""
if not attrs['editable']:
disabled = 'disabled="disabled"'
style = 'style="vertical-align: middle;'
if line % 2:
style = 'style="vertical-align: middle;background-color: rgb(235, 247, 255);'
line += 1
row_content = '''<tr>
<td class="admintdright" %(style)s"> </td>
<td class="admintdleft" %(style)s white-space: nowrap;"><a href="format_template_show?bft=%(filename)s&ln=%(ln)s">%(name)s</a></td>
<td class="admintdleft" %(style)s" >%(description)s</td>
<td class="admintdleft" %(style)s white-space: nowrap;" >%(status)s</td>
<td class="admintdleft" %(style)s white-space: nowrap;" >%(last_mod_date)s</td>
<td class="admintd" %(style)s white-space: nowrap;">
<form method="post" action="format_template_delete?ln=%(ln)s&bft=%(filename)s">
<input class="adminbutton" type="submit" value="%(delete)s" %(disabled)s/>
</form>
</td>
</tr>
''' % {'filename':filename,
'name':name,
'description':description,
'ln':ln,
'style':style,
'disabled':disabled,
'last_mod_date':last_mod_date,
'status':status,
'delete':_("Delete")
}
out += row_content
#table footer, buttons and bottom of the page
out += '''
<tr>
<td align="left" colspan="3">
<form action="format_templates_manage?ln=%(ln)s">
<input type="hidden" name="checking" value="1"></input>
<input class="adminbutton" type="submit" value="%(extensive_checking)s"/>
</form>
</td>
<td align="right" colspan="3">
<form action="format_template_add?ln=%(ln)s">
<input class="adminbutton" type="submit" value="%(add_format_template)s"/>
</form>
</td>
</tr>
</table>
''' % {'ln':ln,
'add_format_template':_("Add New Format Template"),
'extensive_checking':_("Check Format Templates Extensively")}
return out
def tmpl_admin_output_formats_management(self, ln, output_formats):
"""
Returns the main management console for formats. Includes list of formats and
associated administration tools.
@param ln: language
@param output_formats: a list of output formats
@return: main management console as html
"""
_ = gettext_set_language(ln) # load the right message language
#top of the page and table header
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="format_templates_manage?ln=%(ln)s">%(manage_format_templates)s</a></small> </td>
<td>1. <small>%(manage_output_formats)s</small> </td>
<td>2. <small><a href="format_elements_doc?ln=%(ln)s">%(format_elements_documentation)s</a></small> </td>
</tr>
</table>
<p>From here you can add, edit or delete output formats available for collections. Output formats define which template to use. <br/>To edit templates go to the <a href="format_templates_manage?ln=%(ln)s">template administration page</a>.</p>
<table class="admin_wvar" width="95%%" cellspacing="0">
<tr>
<th class="adminheaderleft" > </th>
<th class="adminheaderleft" ><a href="output_formats_manage?ln=%(ln)s&sortby=code">%(code)s</a></th>
<th class="adminheaderleft" ><a href="output_formats_manage?ln=%(ln)s&sortby=name">%(name)s</a></th>
<th class="adminheaderleft" >%(description)s</th>
<th class="adminheaderleft" >%(status)s</th>
<th class="adminheaderleft" >%(last_modification_date)s</th>
<th class="adminheadercenter" >%(action)s [<a href="%(siteurl)s/help/admin/bibformat-admin-guide#outputFormats">?</a>]</th>
</tr>
''' % {'code':_("Code"),
'name':_("Name"),
'description':_("Description"),
'status':_("Status"),
'last_modification_date':_("Last Modification Date"),
'action':_("Action"),
'ln':ln,
'manage_output_formats':_("Manage Output Formats"),
'manage_format_templates':_("Manage Format Templates"),
'format_elements_documentation':_("Format Elements Documentation"),
'menu': _("Menu"),
'siteurl':CFG_SITE_URL}
#table content: formats names, description and buttons
if len(output_formats) == 0:
out += '''<tr>
<td colspan="5" class="admintd" align="center"><em>No format</em></td>
</tr>'''
else:
line = 0
for output_format in output_formats:
format_attributes = output_format['attrs']
name = format_attributes['names']['generic']
if name == "":
name = " "
description = format_attributes['description']
if description == "":
description = " "
code = format_attributes['code']
if code == "":
code = " "
last_mod_date = output_format['last_mod_date']
status = output_format['status']
disabled = ""
if not output_format['editable']:
disabled = 'disabled="disabled"'
style = "vertical-align: middle;"
if line % 2:
style = 'vertical-align: middle; background-color: rgb(235, 247, 255);'
line += 1
row_content = '''<tr>
<td class="admintdright" style="%(style)s"> </td>
<td class="admintdleft" style="white-space: nowrap; %(style)s">
<a href="output_format_show?bfo=%(code)s">%(code)s</a>
</td>
<td class="admintdleft" style="white-space: nowrap; %(style)s">
<a href="output_format_show?bfo=%(code)s">%(name)s</a>
</td>
<td class="admintdleft"style="%(style)s" >
%(description)s
</td>
<td class="admintd" style="white-space: nowrap; %(style)s" >%(status)s</td>
<td class="admintdleft" style="white-space: nowrap;%(style)s" >%(last_mod_date)s</td>
<td class="admintd" style="white-space: nowrap; %(style)s">
<form method="POST" action="output_format_delete?ln=%(ln)s&bfo=%(code)s">
<input class="adminbutton" type="submit" value="Delete" %(disabled)s />
</form>
</td>
</tr>
''' % {'style':style,
'code':code,
'description':description,
'name':name,
'ln':ln,
'disabled':disabled,
'last_mod_date':last_mod_date,
'status':status}
out += row_content
#table footer, buttons and bottom of the page
out += '''
<tr>
<td align="right" colspan="7">
<form method="GET" action="output_format_add?ln=%(ln)s">
<input class="adminbutton" type="submit" value="%(add_output_format)s"/>
</form>
</td>
</tr>
</table>
''' % {'ln':ln,
'add_output_format':_("Add New Output Format")}
return out
def tmpl_admin_output_format_show(self, ln, code, name, rules, default, format_templates, editable):
"""
Returns the content of an output format
rules is an ordered list of dict (sorted by evaluation order),
with keys 'field', 'value' and 'template'
IMPORTANT: we display rules evaluation index starting at 1 in
interface, but we start internally at 0
@param ln: language
@param code: the code of the output to show
@param name: the name of this output format
@param rules: the list of rules for this output format
@param default: the default format template of the output format
@param format_templates: the list of format_templates
@param editable: True if we let user edit, else False
@return: the management console for this output format
"""
_ = gettext_set_language(ln)
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="output_formats_manage?ln=%(ln)s">%(close_output_format)s</a></small> </td>
<td>1. <small>%(rules)s</small> </td>
<td>2. <small><a href="output_format_show_attributes?ln=%(ln)s&bfo=%(code)s">%(modify_output_format_attributes)s</a></small> </td>
<td>3. <small><a href="output_format_show_dependencies?ln=%(ln)s&bfo=%(code)s">%(check_dependencies)s</a></small> </td>
</tr>
</table>
<p>Define here the rules the specifies which template to use for a given record.</p>
''' % {'code':code,
'ln':ln,
'menu':_("menu"),
'close_output_format':_("Close Output Format"),
'rules':_("Rules"),
'modify_output_format_attributes':_("Modify Output Format Attributes"),
'check_dependencies':_("Check Dependencies")
}
out += '''
<form name="rules" action="output_format_show?ln=%(ln)s&bfo=%(code)s" method="post">
<table>
<tr>
<td>
''' % {'ln': ln, 'code':code}
disabled = ""
readonly = ""
if not editable:
disabled = 'disabled="disabled"'
readonly = 'readonly="readonly"'
if len(rules) == 0:
out += '''<p align="center"><em>No special rule</em></p>'''
line = 1
for rule in rules:
out += '''
<table align="center" class="admin_wvar" cellspacing="0">
<tr>
'''
out += '''
<td rowspan="2" class="adminheader" style="vertical-align: middle;">'''
if line > 1:
out += '''
<input type="image" src="%(siteurl)s/img/smallup.gif" alt="Increase priority of rule %(row)s" name="+ %(row)s" value="+ %(row)s" %(disabled)s/></div>
''' % {'siteurl':CFG_SITE_URL, 'row':line, 'disabled':disabled}
out += '''<div>%(row)s</div>''' % { 'row':line}
if line < len(rules):
out += '''
<input type="image" src="%(siteurl)s/img/smalldown.gif" alt="Decrease priority of rule %(row)s" name="- %(row)s" value="- %(row)s" %(disabled)s/>
''' % {'siteurl':CFG_SITE_URL,
'row':line,
'disabled':disabled}
out += '''</td>
<td class="adminheaderleft"> </td>
'''
out += '''
<td class="adminheaderleft" style="white-space: nowrap;">
Use template <select name="r_tpl" %(disabled)s>''' % {'disabled':disabled}
for template in format_templates:
attrs = format_templates[template]['attrs']
attrs['template'] = template
if template.endswith('.xsl') and not \
attrs['name'].endswith(' (XSL)'):
attrs['name'] += ' (XSL)'
if template != rule['template']:
out += '''<option value="%(template)s">%(name)s</option>''' % attrs
else:
out += '''<option value="%(template)s" selected="selected">%(name)s</option>''' % attrs
if not format_templates.has_key(rule['template']) and rule['template'] != "":
#case where a non existing format template is use in output format
#we need to add it as option
out += '''<option value="%s" selected="selected">%s</option>''' % (rule['template'],
rule['template'])
################ FIXME remove when migration is done ####################
#Let the user choose a non existing template, that is a placeholder
#meaning that the template has not been migrated
selected = ''
if rule['template'] == 'migration_in_progress':
selected = 'selected="selected"'
if CFG_PATH_PHP or selected != '':
out += '''<option disabled="disabled">For Migration:</option>'''
out += '''<option value="migration_in_progress" %s>defined in old BibFormat</option>''' % selected
################ END FIXME ####################
out += '''</select> if field
<input type="text" name="r_fld" value="%(field)s" size="10" %(readonly)s/> is equal to <input type="text" value="%(value)s" name="r_val" %(readonly)s/>
</td>
<td class="adminheaderright" style="vertical-align: middle;">
[<a href="%(siteurl)s/help/admin/bibformat-admin-guide#rulesOutputFormat">?</a>]
</td>
</tr>
''' % {'siteurl':CFG_SITE_URL,
'field': rule['field'],
'value':rule['value'],
'readonly':readonly}
out += '''
<tr>
<td colspan ="3" class="adminheaderright" style="vertical-align: middle; white-space: nowrap;">
<input type="submit" class="adminbutton" name="r_upd" value="%(remove_rule_label)s %(row)s" %(disabled)s/>
</td>
</tr>
</table>
''' % {'remove_rule_label': _("Remove Rule"),
'row':line,
'disabled':disabled}
line += 1
out += '''
<table width="100%" align="center" class="admin_wvar" cellspacing="0">
<tr>
'''
out += '''
<td width="30" class="adminheaderleft"> </td>
<td class="adminheaderleft">By default use <select id="default" name="default" %(disabled)s>''' % {'disabled':disabled}
for template in format_templates:
attrs = format_templates[template]['attrs']
attrs['template'] = template
if template.endswith('.xsl') and not \
attrs['name'].endswith(' (XSL)'):
attrs['name'] += ' (XSL)'
if template != default:
out += '''<option value="%(template)s">%(name)s</option>''' % attrs
else:
out += '''<option value="%(template)s" selected="selected">%(name)s</option>''' % attrs
if not format_templates.has_key(default) and default!= "":
#case where a non existing format tempate is use in output format
#we need to add it as option (only if it is not empty string)
out += '''<option value="%s" selected="selected">%s</option>''' % (default,default)
################ FIXME remove when migration is done ####################
#Let the user choose a non existing template, that is a placeholder
#meaning that the template has not been migrated
selected = ''
if default == 'migration_in_progress':
selected = 'selected="selected"'
if CFG_PATH_PHP or selected != '':
out += '''<option disabled="disabled">For Migration:</option>'''
out += '''<option value="migration_in_progress" %s>defined in old BibFormat</option>''' % selected
################ END FIXME ####################
out += '''</select></td>
</tr>
</table>
<div align="right">
<input tabindex="6" class="adminbutton" type="submit" name="r_upd" value="%(add_new_rule_label)s" %(disabled)s/>
<input tabindex="7" class="adminbutton" type="submit" name="r_upd" value="%(save_changes_label)s" %(disabled)s/>
</div>
</td>
</tr>
</table>
</form>
''' % {'add_new_rule_label':_("Add New Rule"),
'save_changes_label':_("Save Changes"),
'disabled':disabled
}
return out
def tmpl_admin_output_format_show_attributes(self, ln,
name,
description,
content_type,
code,
names_trans,
editable,
visible):
"""
Returns a page to change output format name and description
names_trans is an ordered list of dicts with keys 'lang' and 'trans'
@param ln: language
@param name: the name of the format
@param description: the description of the format
@param code: the code of the format
@param content_type: the (MIME) content type of the ouput format
@param names_trans: the translations in the same order as the languages from get_languages()
@param editable: True if we let user edit, else False
@param visible: True if output format should be shown in list of available output formats
@return: editor for output format attributes
"""
_ = gettext_set_language(ln) # load the right message language
out = ""
out += '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="output_formats_manage?ln=%(ln)s">%(close_output_format)s</a></small> </td>
<td>1. <small><a href="output_format_show?ln=%(ln)s&bfo=%(code)s">%(rules)s</a></small> </td>
<td>2. <small>%(modify_output_format_attributes)s</small> </td>
<td>3. <small><a href="output_format_show_dependencies?ln=%(ln)s&bfo=%(code)s">%(check_dependencies)s</a></small> </td>
</tr>
</table><br/>
''' % {'ln':ln,
'code':code,
'close_output_format':_("Close Output Format"),
'rules':_("Rules"),
'modify_output_format_attributes':_("Modify Output Format Attributes"),
'check_dependencies':_("Check Dependencies"),
'menu':_("Menu")
}
disabled = ""
readonly = ""
if not editable:
disabled = 'disabled="disabled"'
readonly = 'readonly="readonly"'
out += '''
<form action="output_format_update_attributes?ln=%(ln)s&bfo=%(code)s" method="POST">
<table class="admin_wvar" cellspacing="0">
<tr>
<th colspan="2" class="adminheaderleft">
Output Format Attributes [<a href="%(siteurl)s/help/admin/bibformat-admin-guide#attrsOutputFormat">?</a>]</th>
</tr>
<tr>
<td class="admintdright"><label for="outputFormatCode">Code</label>: </td>
<td><input tabindex="0" name="code" type="text" id="outputFormatCode" maxlength="6" size="6" value="%(code)s" %(readonly)s/></td>
</tr>
<tr>
<td class="admintdright">Visibility: </td>
<td><input tabindex="1" name="visibility" type="checkbox" id="outputFormatVisibility" %(visibility)s %(disabled)s value="1" /><small><label for="outputFormatVisibility">Show in list of available output formats (on public pages)</label></small></td>
</tr>
<td class="admintdright"><label for="outputFormatContentType">Content type</label>: </td>
<td><input tabindex="2" name="content_type" type="text" id="outputFormatContentType" size="25" value="%(content_type)s" %(readonly)s/> <small>Mime content-type. Specifies how the browser should handle this output.</small></td>
<tr>
<td class="admintdright"><label for="outputFormatName">Name</label>: </td>
<td><input tabindex="3" name="name" type="text" id="outputFormatName" size="25" value="%(name)s" %(readonly)s/></td>
</tr>
''' % {'name': name,
'ln':ln,
'code':code,
'content_type':content_type,
'readonly':readonly,
'siteurl':CFG_SITE_URL,
'visibility': visible==1 and 'checked="checked"' or '',
'disabled':disabled}
#Add translated names
i = 3
for name_trans in names_trans:
i += 1
out += '''
<tr>
<td class="admintdright"><label for="outputFormatName%(i)s">%(lang)s Name</label>: </td>
<td><input tabindex="%(i)s" name="names_trans" type="text" id="outputFormatName%(i)s" size="25" value="%(name)s" %(readonly)s/></td>
</tr>''' % {'name':name_trans['trans'],
'lang':name_trans['lang'],
'i':i,
'readonly':readonly}
#Description and end of page
out += '''
<tr>
<td class="admintdright" valign="top"><label for="outputFormatDescription">Description</label>: </td>
<td><textarea tabindex="%(tabindexdesc)s" name="description" id="outputFormatDescription" rows="4" cols="25" %(readonly)s>%(description)s</textarea> </td>
</tr>
<tr>
<td colspan="2" align="right"><input tabindex="%(tabindexbutton)s" class="adminbutton" type="submit" value="Update Output Format Attributes" %(disabled)s/></td>
</tr>
</table>
</form>
''' % {'description': description,
'tabindexdesc': i + 1,
'tabindexbutton': i + 2,
'readonly':readonly,
'disabled':disabled}
return out
def tmpl_admin_output_format_show_dependencies(self, ln, name, code, format_templates):
"""
Shows the dependencies of the given format.
@param ln: language
@param name: the name of the output format
@param code: the code of the output format
@param format_templates: format templates that depend on this format (and also elements and tags)
@return: HTML markup
"""
_ = gettext_set_language(ln) # load the right message language
out = '''
<table class="admin_wvar">
<tr><th colspan="4" class="adminheaderleft" cellspacing="0">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="output_formats_manage?ln=%(ln)s">%(close_output_format)s</a></small> </td>
<td>1. <small><a href="output_format_show?ln=%(ln)s&bfo=%(code)s">%(rules)s</a></small> </td>
<td>2. <small><a href="output_format_show_attributes?ln=%(ln)s&bfo=%(code)s">%(modify_output_format_attributes)s</a></small> </td>
<td>3. <small>%(check_dependencies)s</small> </td>
</tr>
</table><br/>
<table width="90%%" class="admin_wvar" cellspacing="0"><tr>
<th class="adminheaderleft">Format Templates that use %(name)s</th>
<th class="adminheaderleft">Format Elements used by %(name)s</th>
<th class="adminheaderleft">Tags Called*</th>
</tr>
''' % {'name': name,
'code': code,
'ln':ln,
'close_output_format':_("Close Output Format"),
'rules':_("Rules"),
'modify_output_format_attributes':_("Modify Output Format Attributes"),
'check_dependencies':_("Check Dependencies"),
'menu': _("Menu")
}
if len(format_templates) == 0:
out += '''<tr><td colspan="3"><p align="center">
<i>This output format uses no format template.</i></p></td></tr>'''
for format_template in format_templates:
name = format_template['name']
filename = format_template['filename']
out += '''<tr><td><a href="format_template_show?bft=%(filename)s&ln=%(ln)s">%(name)s</a></td>
<td> </td><td> </td></tr>''' % {'filename':filename,
'name':name,
'ln':ln}
for format_element in format_template['elements']:
name = format_element['name']
filename = format_element['filename']
out += '''<tr><td> </td>
<td><a href="format_elements_doc?ln=%(ln)s#%(anchor)s">%(name)s</a></td>
<td> </td></tr>''' % {'anchor':name.upper(),
'name':name,
'ln':ln}
for tag in format_element['tags']:
out += '''<tr><td> </td><td> </td>
<td>%(tag)s</td></tr>''' % {'tag':tag}
out += '''
</table>
<b>*Note</b>: Some tags linked with this format template might not be shown. Check manually.
'''
return out
def tmpl_admin_format_elements_documentation(self, ln, format_elements):
"""
Returns the main management console for format elements. Includes list of formats elements and
associated administration tools.
@param ln: language
@param format_elements: a list of dictionaries with formats elements attributes
@return: main management console as html
"""
_ = gettext_set_language(ln) # load the right message language
#top of the page and table header
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="format_templates_manage?ln=%(ln)s">%(manage_format_templates)s</a></small> </td>
<td>1. <small><a href="output_formats_manage?ln=%(ln)s">%(manage_output_formats)s</a></small> </td>
<td>2. <small>%(format_elements_documentation)s</small> </td>
</tr>
</table>
<p>Here you can read the APIs of the formats elements, the elementary bricks for formats.</p>
''' % {'ln':ln,
'menu': _("Menu"),
'manage_output_formats':_("Manage Output Formats"),
'manage_format_templates':_("Manage Format Templates"),
'format_elements_documentation':_("Format Elements Documentation"),
}
#table content: formats names, description and actions
if len(format_elements) == 0:
out += '''
<em>No format elements found</em>
'''
else:
#Print summary of elements (name + decription)
out += '''<h2>Summary table of elements</h2>'''
out += '''<table width="90%">'''
for format_element in format_elements:
format_attributes = format_element['attrs']
out += '''
<tr>
<td>
<code><a href="#%(name)s"><BFE_%(name)s/></a></code>
</td>
<td>
%(description)s
</td>
</tr>
''' % format_attributes
out += "</table>"
#Print details of elements
out += '''<h2>Details of elements</h2>'''
for format_element in format_elements:
format_attributes = format_element['attrs']
element_name = format_attributes['name']
out += self.tmpl_admin_print_format_element_documentation(ln, element_name, format_attributes)
#table footer, buttons and bottom of the page
out += '''
<table align="center" width="95%">
</table>'''
return out
def tmpl_admin_print_format_element_documentation(self, ln, name, attributes, print_see_also=True):
"""
Prints the formatted documentation of a single element. Used in main documentation of element and
in creation of floater for Dreamweaver.
@param ln: language
@param name: the name of the element
@param attributes: the attributes of the element, as returned by get_format_element_attrs_from_*
@param print_see_also: if True, prints links to other sections related to element
@return: HTML markup
"""
params_names = ""
for param in attributes['params']:
params_names += "<b>"+param['name'] +'</b>="..." '
out = '''
<a name="%(name)s"></a><h3>%(name)s</h3>
<b><BFE_%(name)s</b> %(params_names)s<b>/></b><br/><br/>
<em>%(description)s.</em><br/><br/>
<b>Parameters:</b><br/>
''' % {'params_names': params_names,
'name':name,
'description': attributes['description']}
for param in attributes['params']:
out += '''
<code>%(name)s</code> - %(description)s. ''' % param
if param['default'] != "":
default = cgi.escape(str(param['default']))
if default.strip() == "":
default = " "
out += '''
Default value is «<code>%s</code>»
''' % default
out += '<br/>'
for param in attributes['builtin_params']:
out += '''
<code>%(name)s</code> - %(description)s. ''' % param
if param['default'] != "":
default = cgi.escape(str(param['default']))
if default.strip() == "":
default = " "
out += '''
Default value is «<code>%s</code>»
''' % default
out += '<br/>'
if print_see_also:
out += '''<br/>
<b>See also:</b><br/>'''
for element in attributes['seealso']:
element_name = element.split('.')[0].upper()
out += '''
<a href="#%(name)s">Element <em>%(name)s</em></a><br/>''' % {'name':element_name}
out += '''
<a href ="format_element_show_dependencies?ln=%(ln)s&bfe=%(bfe)s">Dependencies of this element</a><br/>
<a href ="validate_format?ln=%(ln)s&bfe=%(bfe)s">The correctness of this element</a><br/>
<a href ="format_element_test?ln=%(ln)s&bfe=%(bfe)s">Test this element</a><br/>
''' % {'ln':ln, 'bfe':name}
return out
def tmpl_admin_format_element_show_dependencies(self, ln, name, format_templates, tags):
"""
Shows the dependencies of the given format element
@param ln: language
@param name: the name of the element
@param format_templates: format templates that depend on this element
@param tags: the tags that are called by this format element
@return: HTML markup
"""
out = '''
<p>Go back to <a href="format_elements_doc?ln=%(ln)s#%(name)s">documentation</a></p>
''' % {'ln':ln, 'name':name.upper()}
out += ''' <table width="90%" class="admin_wvar" cellspacing="0"><tr>'''
out += '''
<th class="adminheaderleft">Format Templates that use %(name)s</th>
<th class="adminheaderleft">Tags Called*</th>
</tr>
<tr>
<td> <br/>''' % {"name": name}
#Print format elements (and tags)
if len(format_templates) == 0:
out += '''<p align="center">
<i>This format element is not used in any format template.</i></p>'''
for format_template in format_templates:
name = format_template['name']
filename = format_template['filename']
out += '''<a href="format_template_show?ln=%(ln)s&bft=%(filename)s">%(name)s</a><br/>''' % {'filename':filename,
'name':name,
'ln':ln}
#Print tags
out += "</td><td> <br/>"
if len(tags) == 0:
out += '''<p align="center">
<i>This format element uses no tag.</i></p>'''
for tag in tags:
out += '''%(tag)s<br/>''' % {'tag':tag}
out += '''
</td>
</tr>
</table>
<b>*Note</b>: Some tags linked with this format template might not be shown. Check manually.
'''
return out
def tmpl_admin_format_element_test(self, ln, bfe, description, param_names, param_values, param_descriptions, result):
"""
Prints a page where the user can test the given format element with his own parameters.
@param ln: language
@param bfe: the format element name
@param description: a description of the element
@param param_names: a list of parameters names/labels
@param param_values: a list of values for parameters
@param param_descriptions: a list of description for parameters
@param result: the result of the evaluation
@return: HTML markup
"""
out = '''
<p>Go back to <a href="format_elements_doc?ln=%(ln)s#%(name)s">documentation</a></p>
''' % {'ln':ln, 'name':bfe.upper()}
out += '''
<h3><BFE_%(bfe)s /></h3>
<p>%(description)s</p>
<table width="100%%"><tr><td>
<form method="post" action="format_element_test?ln=%(ln)s&bfe=%(bfe)s">
<table>
''' % {'bfe':bfe, 'ln':ln, 'description':description }
for i in range(len(param_names)):
out += '''
<tr>
<td class="admintdright">%(name)s</td>
<td class="admintdright"><input type="text" name="param_values" value="%(value)s"/></td>
<td class="admintdleft">%(description)s </td>
</tr>
''' % {'name':cgi.escape(param_names[i]),
'value':cgi.escape(param_values[i], quote=True),
'description':param_descriptions[i]}
out += '''
<tr><td colspan="2" class="admintdright"><input type="submit" class="adminbutton" value="Test!"/></td>
<td> </td>
</tr>
</table>
</form>
<fieldset style="display:inline;margin-left:auto;margin-right:auto;">
<legend>Result:</legend>%(result)s</fieldset>
''' % {'result':result}
out += '''
</td></tr><tr><td>
'''
#out += self.tmpl_admin_print_format_element_documentation(ln, bfe, attributes, False)
out += '''</td></tr></table>'''
return out
def tmpl_admin_add_format_element(self, ln):
"""
Shows how to add a format element (mainly doc)
@param ln: language
@return: HTML markup
"""
_ = gettext_set_language(ln) # load the right message language
out = '''
<p>To add a new basic element (only fetch the value of a field, without special post-processing), go to the <a href="%(siteurl)sadmin/bibindex/bibindexadmin.py/field">BibEdit "Manage Logical Fields"</a> page and add a name for a field. Make sure that the name is unique and corresponds well to the field. For example, to add an element that fetch the value of field 245__%%, add a new logical field with name "title" and field "245__%%". Then in your template, call BFE_TITLE to print the title.</p>
<p>To add a new complex element (for eg. special formatting of the field, condition on the value, etc.) you must go to the lib/python/invenio/bibformat_elements directory of your Invenio installation, and add a new format element file. Read documentation for more information.</p>
''' % {'siteurl':CFG_SITE_URL}
return out
def tmpl_dreamweaver_floater(self, ln, format_elements):
"""
Returns the content of the BibFormat palette for Dreamweaver. This
'floater' will let users of Dreamweaver to insert Format elements
into their code right from the floater.
@param ln: language
@param format_elements: an ordered list of format elements structures as returned by get_format_elements
@return: HTML markup (according to Dreamweaver specs)
"""
names_list = [] # list of element names such as ['Authors', 'Title']
codes_list = [] # list of element code such as ['<BFE_AUTHORS limit="" separator="," />', '<BFE_TITLE />']
docs_list = [] # list of HTML doc for each element
for format_element in format_elements:
format_attributes = format_element['attrs']
name = format_attributes['name']
#description = format_attributes['description']
params = [x['name'] + '="'+str(x['default'])+'"' for x in format_attributes['params']]
builtin_params = [x['name'] + '="'+str(x['default'])+'"' for x in format_attributes['builtin_params']]
code = ("<BFE_" + name + ' ' + ' '.join(builtin_params)+ ' ' + ' '.join(params) +"/>").replace("'", r"\'")
doc = self.tmpl_admin_print_format_element_documentation(ln, name, format_attributes, print_see_also=False).replace("'", r"\'")
names_list.append(name)
codes_list.append(code)
docs_list.append(doc)
out = '''
<!DOCTYPE HTML SYSTEM "-//Macromedia//DWExtension layout-engine5.0//floater">
<html>
<head>
<!-- This file is to be used as floating panel for Dreamweaver.
To install, drag and drop inside /Configuration/Floaters of your Dreamweaver
application directory. You also have to enable a menu to open the floater:
Edit file Menu.xml located inside /Configuration/Menus of your Dreamweaver
application directory and copy-paste the following line in the menu you want
(typically inside tag 'menu' with attribute id = 'DWMenu_Window_Others'):
<menuitem name="BibFormat Elements" enabled="true" command="dw.toggleFloater('BibFormat_floater.html')" checked="dw.getFloaterVisibility('BibFormat_floater.html')" />
-->
<title>BibFormat Elements</title>
<script language="JavaScript">
var docs = new Array(%(docs)s);
var codes = new Array(%(codes)s);
function selectionChanged(){
// get the selected node
var theDOM = dw.getDocumentDOM();
var theNode = theDOM.getSelectedNode();
// check if node is a BibFormat Element
if (theNode.nodeType == Node.COMMENT_NODE && theNode.data.length >= 5 && theNode.data.toLowerCase().substring(0,5) == "<bfe_"){
var names = document.elementsList.options;
for (i=0;i<names.length; i++){
if (names[i].text.toLowerCase() == theNode.data.split(' ')[0].toLowerCase() ||
names[i].text.toLowerCase() == theNode.data.split(' ')[0].toLowerCase().substring(5,theNode.data.length)){
document.elementsList.selectedIndex = i;
selectElement(document.elementsList);
return;
}
}
}
}
function isAvailableInCodeView(){
return true;
}
function selectElement(elementsList){
document.infoBFE.innerHTML = docs[elementsList.selectedIndex];
}
function insertElement(){
// insert selection into code
var element_code = codes[document.elementsList.selectedIndex];
// get the DOM
var theDOM = dw.getDocumentDOM();
var theDocEl = theDOM.documentElement;
var theWholeDoc = theDocEl.outerHTML;
// Get the offsets of the selection
var theSel = theDOM.getSelection();
theDocEl.outerHTML = theWholeDoc.substring(0,theSel[0]) + element_code + theWholeDoc.substring(theSel[1]);
}
</script>
</head>
<body>
<table width="100%%" border="0" cellspacing="0" cellpadding="3">
<tr>
<td valign="top">
<select name="elementsList" id="elementsList" size="15" onChange="selectElement(this)">
%(names)s
</select><br/>
<input type="submit" name="Submit" value="Insert" onClick="insertElement()">
</td>
<td valign="top" width="100%%">
<div id="infoBFE">
<center>No Format Element selected. Select one from the list on the right.</center>
</div>
</td>
</tr>
</table>
</body>
</html>
''' % {'docs': ', '.join(["'"+x+"'" for x in docs_list]).replace('\n','\\n'),
'codes': ', '.join(["'"+x+"'" for x in codes_list]).replace('\n','\\n'),
'names': '\n'.join(['<option value="'+x+'">'+x+'</option>' for x in names_list])}
return out
def tmpl_admin_validate_format(self, ln, errors):
"""
Prints the errors of the validation of a format (might be any
kind of format)
@param ln: language
@param errors: a list of tuples (error code, string error message)
@return: HTML markup
"""
_ = gettext_set_language(ln) # load the right message language
out = ""
if len(errors) == 0:
out += '''<span style="color: rgb(0, 255, 0);" >%s.</span>''' % _('No problem found with format')
elif len(errors) == 1:
out += '''<span style="color: rgb(255, 0, 0);" >%s:</span><br/>''' % _('An error has been found')
else:
out += '''<span style="color: rgb(255, 0, 0);" >%s:</span><br/>''' % _('The following errors have been found')
for error in errors:
out += error + "<br/>"
return out
def tmpl_admin_dialog_box(self, url, ln, title, message, options):
"""
Prints a dialog box with given title, message and options
@param url: the url of the page that must process the result of the dialog box
@param ln: language
@param title: the title of the dialog box
@param message: a formatted message to display inside dialog box
@param options: a list of string options to display as button to the user
@return: HTML markup
"""
out = ""
out += '''
<div style="text-align:center;">
<fieldset style="display:inline;margin-left:auto;margin-right:auto;">
<legend>%(title)s:</legend>
<p>%(message)s</p>
<form method="post" action="%(url)s">
''' % {'title':title,
'message':message,
'url':url}
for option in options:
out += '''<input type="submit" class="adminbutton" name="chosen_option" value="%(value)s" /> ''' % {'value':option}
out += '''</form></fieldset></div>'''
return out
| gpl-2.0 |
stormrose-va/xobox | tests/t_cli/test_logger.py | 1 | 1540 | # -*- coding: utf-8 -*-
"""
tests.t_cli.test_logger
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2017 by the Stormrose Project team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
import os
import sys
import tempfile
from unittest import TestCase, skipIf
from xobox.cli.logger import Logger
class TestXoboxCliLogger(TestCase):
"""
Unit tests for :py:mod:`xobox.cli.logger`
"""
@skipIf(sys.version_info < (3, 0, 0), "Singleton instance recognition only works in Python 3")
def test_01(self):
"""
Test Case 01:
Try getting an instance of :py:class:`~xobox.cli.logger.Logger`.
Test is passed if instance is an instance of :py:class:`~xobox.cli.logger.Logger`.
"""
obj = Logger.get_instance()
self.assertIsInstance(obj, Logger)
def test_02(self):
"""
Test Case 02:
Test logger by logging a tests message into a file.
Test is passed if file content meets expectation.
"""
fd, name = tempfile.mkstemp()
os.close(fd)
logger = Logger.get_instance()
logger.file = name
logger.type = 'file'
logger.log_error("This is a tests message")
logger.type = 'term'
fp = open(name, 'r')
content = fp.read()
fp.close()
os.unlink(name)
self.assertRegexpMatches(
content,
r'^\[\d{4}\-\d{2}\-\d{2} \d{2}:\d{2}:\d{2}\] \[ERROR\] This is a tests message$'
)
| mit |
ltilve/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/boto/tests/integration/ec2containerservice/__init__.py | 99 | 1122 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| bsd-3-clause |
CORDEA/analysis_of_1000genomes-data | programs/add_annotations/missense.py | 1 | 1346 | #!/bin/env python
# encoding:utf-8
#
# Author: CORDEA
# Created: 2014-09-12
#
infile = open("ilm_missene.onmis.vcf", "r")
lines = infile.readlines()
infile.close()
msDict = {}
for line in lines:
samples = line.split(",")[5]
tmp = samples.split("|")
for i in range(len(tmp)):
try:
msDict[i][int(tmp[i])] += 1
except:
msDict[i] = [0, 0, 0, 0]
msDict[i][int(tmp[i])] += 1
outFile = open("missene.snp_freq", "w")
all = [0,0,0,0]
for k, v in msDict.items():
outFile.write(str(k) + ",")
oSUM = v[1] + v[2] + v[3]
outFile.write(str(round( (v[1]/float(oSUM))*100, 2)) + ",")
outFile.write(str(round( ((v[2]+v[3])/float(oSUM))*100, 2)) + ",")
outFile.write(str(round( (v[1]/float(oSUM))*100, 2)) + ",")
outFile.write(str(round( (v[2]/float(oSUM))*100, 2)) + ",")
outFile.write(str(round( (v[3]/float(oSUM))*100, 2)) + "\n")
all[0] += v[0]
all[1] += v[1]
all[2] += v[2]
all[3] += v[3]
oSUM = all[1] + all[2] + all[3]
outFile.write(str(round( (all[1]/float(oSUM))*100, 2)) + ",")
outFile.write(str(round( ((all[2]+all[3])/float(oSUM))*100, 2)) + ",")
outFile.write(str(round( (all[1]/float(oSUM))*100, 2)) + ",")
outFile.write(str(round( (all[2]/float(oSUM))*100, 2)) + ",")
outFile.write(str(round( (all[3]/float(oSUM))*100, 2)) + "\n")
| apache-2.0 |
Saviio/ClawMonitor | urls.py | 1 | 1245 | from utils import current_random, current, timestring
import urllib, time
class urls():
def __init__():
return None
@staticmethod
def verify_usr(usr):
return "http://login.xunlei.com/check/?u=%s&business_type=108&cachetime=%d&" % (usr, current())
@staticmethod
def login():
return "http://login.xunlei.com/sec2login/"
@staticmethod
def main():
return "http://dynamic.lixian.vip.xunlei.com/login?cachetime=%d&from=0" % current()
@staticmethod
def task_check(url):
return "http://dynamic.cloud.vip.xunlei.com/interface/task_check?callback=queryCid&url=%s&random=%s&tcache=%s" % (urllib.quote(url), current_random(), current())
@staticmethod
def task_commit():
return "http://dynamic.cloud.vip.xunlei.com/interface/task_commit"
@staticmethod
def task_list(num):
timestamp = int(time.mktime(time.localtime()))
return ("http://dynamic.cloud.vip.xunlei.com/interface/showtask_unfresh?callback=jsonp%s&t=%s&type_id=4&page=1&tasknum=%s&p=1&interfrom=task" % (timestamp, timestring(), num)).replace(' ','%20')
@staticmethod
def verify_img():
return "http://verify2.xunlei.com/image?t=MVA&cachetime=%s" % current() | mit |
Balance-Breaker/Algo_Ds_Notes | Dijkstra_Algorithm/Dijkstra_Algorithm.py | 4 | 3700 | '''
Dijkstra's algorithm for weighted undirected graph
'''
from collections import deque
class Dijkstra:
def __init__(self, graph):
self.vertex_visited = list()
self.distance = {}
self.graph = graph
self.source = None
self.queue_size = 0
self.min_queue = deque()
def initialise(self):
self.vertex_visited = list()
self.distance = {}
#Initialize vertex cost
for k,v in self.graph.iteritems():
if k == self.source:
self.distance.update({k:0})
else:
self.distance.update({k:float('inf')})
#Store source vetex and cost
for k,v in self.graph[self.source].iteritems():
self.priorityQueue({k:v})
def priorityQueue(self,weight):
self.min_queue.append(weight)
self.queue_size = self.queue_size + 1
self.heapify(self.queue_size)
def heapify(self,i):
while i/2 > 0:
if self.min_queue[i].values() <= self.min_queue[i/2].values():
temp = self.min_queue[i]
self.min_queue[i] = self.min_queue[i/2]
self.min_queue[i/2] = temp
i = i/2
def del_min(self):
popped = self.min_queue[1]
self.min_queue[1] = self.min_queue[self.queue_size] #Assign last element to first
self.queue_size = self.queue_size - 1;
self.min_queue.pop()
self.re_heapify(1)
return popped
def re_heapify(self, i):
while 2 * i <= self.queue_size:
mc = self.min_node(i)
if self.min_queue[mc].values() < self.min_queue[i].values():
temp = self.min_queue[i]
self.min_queue[i] = self.min_queue[mc]
self.min_queue[mc] = temp
i = mc
def min_node(self, i):
if (2 * i + 1) > self.queue_size:
return 2 * i;
else:
if self.min_queue[2 * i].values() < self.min_queue[2 * i + 1].values():
return 2 * i
else:
return 2 * i +1
def minDistance(self, source):
self.source = source
self.min_queue.append({self.source:0}) #Insert source vertex into pq and make its distance as 0.
self.initialise() # Reset values for new source
while len(self.min_queue) > 1:
vertex = self.del_min() #Pop out minimum distance vertex from minimum priority queue
if vertex not in self.vertex_visited:
self.vertex_visited.append(vertex)
for parentNode, parentCost in vertex.iteritems():
for adjVertex, adjCost in self.graph[parentNode].iteritems():
if adjVertex not in self.distance:
self.distance.update({adjVertex:adjCost})
else:
#Compare
if self.distance[adjVertex] > (self.distance[parentNode] + adjCost):
self.distance[adjVertex] = self.distance[parentNode] + adjCost
self.priorityQueue({adjVertex:adjCost}) #Add to minimum priority queue
return self.distance
#Graph stored as adjacent list
g = { 'A': {'C': 9, 'B': 7, 'F': 14},
'B': {'A': 7, 'C': 10, 'D': 15},
'C': {'A': 9, 'B': 10, 'D': 11, 'F': 2},
'D': {'E': 6, 'B': 15, 'C': 11},
'E': {'F': 9, 'D': 6},
'F': {'C': 2, 'A': 14, 'E': 9}
}
dijkstra = Dijkstra(g)
print dijkstra.minDistance('A')
print dijkstra.minDistance('E')
'''
Output
-------
{'A': 0, 'C': 9, 'B': 7, 'E': 20, 'D': 20, 'F': 11}
{'A': 26, 'C': 17, 'B': 21, 'E': 0, 'D': 6, 'F': 9}
'''
| gpl-3.0 |
blx/pyjade | examples/django_example/settings.py | 9 | 5251 | # Django settings for django_prueba project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_r4d7*&$@r18hy7w=pi!%97nha-_!k$#+y%go1blia6u%gs$&l'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
('pyjade.ext.django.Loader',(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# # 'django.template.loaders.eggs.Loader',
# )
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'django_example.urls'
TEMPLATE_DIRS = (
'templates/'
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit |
tzmgit/fast-api-ui | fast_api_ui/database.py | 28 | 2310 | # -*- coding: utf-8 -*-
"""Database module, including the SQLAlchemy database object and DB-related utilities."""
from sqlalchemy.orm import relationship
from .compat import basestring
from .extensions import db
# Alias common SQLAlchemy names
Column = db.Column
relationship = relationship
class CRUDMixin(object):
"""Mixin that adds convenience methods for CRUD (create, read, update, delete) operations."""
@classmethod
def create(cls, **kwargs):
"""Create a new record and save it the database."""
instance = cls(**kwargs)
return instance.save()
def update(self, commit=True, **kwargs):
"""Update specific fields of a record."""
for attr, value in kwargs.items():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
"""Save the record."""
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
"""Remove the record from the database."""
db.session.delete(self)
return commit and db.session.commit()
class Model(CRUDMixin, db.Model):
"""Base model class that includes CRUD convenience methods."""
__abstract__ = True
# From Mike Bayer's "Building the app" talk
# https://speakerdeck.com/zzzeek/building-the-app
class SurrogatePK(object):
"""A mixin that adds a surrogate integer 'primary key' column named ``id`` to any declarative-mapped class."""
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, record_id):
"""Get record by ID."""
if any(
(isinstance(record_id, basestring) and record_id.isdigit(),
isinstance(record_id, (int, float))),
):
return cls.query.get(int(record_id))
return None
def reference_col(tablename, nullable=False, pk_name='id', **kwargs):
"""Column that adds primary key foreign key reference.
Usage: ::
category_id = reference_col('category')
category = relationship('Category', backref='categories')
"""
return db.Column(
db.ForeignKey('{0}.{1}'.format(tablename, pk_name)),
nullable=nullable, **kwargs)
| bsd-3-clause |
Chilledheart/chromium | third_party/closure_linter/closure_linter/common/tokens_test.py | 126 | 3089 | #!/usr/bin/env python
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'nnaze@google.com (Nathan Naze)'
import unittest as googletest
from closure_linter.common import tokens
def _CreateDummyToken():
return tokens.Token('foo', None, 1, 1)
def _CreateDummyTokens(count):
dummy_tokens = []
for _ in xrange(count):
dummy_tokens.append(_CreateDummyToken())
return dummy_tokens
def _SetTokensAsNeighbors(neighbor_tokens):
for i in xrange(len(neighbor_tokens)):
prev_index = i - 1
next_index = i + 1
if prev_index >= 0:
neighbor_tokens[i].previous = neighbor_tokens[prev_index]
if next_index < len(neighbor_tokens):
neighbor_tokens[i].next = neighbor_tokens[next_index]
class TokensTest(googletest.TestCase):
def testIsFirstInLine(self):
# First token in file (has no previous).
self.assertTrue(_CreateDummyToken().IsFirstInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(b.IsFirstInLine())
# Tokens on different lines
b.line_number = 31
self.assertTrue(b.IsFirstInLine())
def testIsLastInLine(self):
# Last token in file (has no next).
self.assertTrue(_CreateDummyToken().IsLastInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(a.IsLastInLine())
b.line_number = 31
self.assertTrue(a.IsLastInLine())
def testIsType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsType('fakeType1'))
self.assertFalse(a.IsType('fakeType2'))
def testIsAnyType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
def testRepr(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
def testIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
i = iter(a)
self.assertListEqual([a, b, c, d, e], list(i))
def testReverseIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
ri = reversed(e)
self.assertListEqual([e, d, c, b, a], list(ri))
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
oleksa-pavlenko/gae-django-project-template | django/views/i18n.py | 68 | 11000 | import importlib
import json
import os
import gettext as gettext_module
from django import http
from django.apps import apps
from django.conf import settings
from django.template import Context, Template
from django.utils.translation import check_for_language, to_locale, get_language, LANGUAGE_SESSION_KEY
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Template(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
app_configs = apps.get_app_configs()
allowable_packages = set(app_config.name for app_config in app_configs)
allowable_packages.add('django.conf')
packages = [p for p in packages if p in allowable_packages]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog, plural = get_javascript_catalog(locale, domain, packages)
return render_javascript_catalog(catalog, plural)
| mit |
marbindrakon/eve-wspace | evewspace/Alerts/models.py | 10 | 3002 | # Eve W-Space
# Copyright 2014 Andrew Austin and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
from django.contrib.auth.models import Group
from django.conf import settings
# Create your models here.
User = settings.AUTH_USER_MODEL
class SubscriptionGroup(models.Model):
"""Contians the definition for alert broadcast groups."""
name = models.CharField(max_length=64, unique=True)
desc = models.CharField(max_length=200)
# A special alert group is one that cannot be individually joined or left.
special = models.BooleanField(default=False)
members = models.ManyToManyField(User, through='Subscription')
class Meta:
permissions = (("can_alert", "Use the alerts system."),
("alert_admin", "Modify alert groups and rosters."),
("can_ping_special", "Ping alert groups tagged special."),
)
def __unicode__(self):
return self.name
def get_user_perms(self, user):
"""
Returns a tuple of permissions for the subscription group as such:
(can_broadcast, can_join)
A user's highest permissions in both are returned and special gorups
will always return can_join = False.
"""
if self.special:
user_perm = user.has_perm("Alerts.can_ping_special")
return (user_perm, user_perm)
can_join = False
can_broadcast = False
for group in user.groups.all():
if self.group_permissions.filter(user_group=group).exists():
perm = self.group_permissions.get(user_group=group)
if perm.can_broadcast:
can_broadcast = True
if perm.can_join:
can_join = True
return (can_broadcast, can_join)
class Subscription(models.Model):
"""Mapping table that relates Users to their subscriped SubscriptionGroups."""
group = models.ForeignKey(SubscriptionGroup)
user = models.ForeignKey(User, related_name="alert_groups")
class SubscriptionGroupPermission(models.Model):
"""Mapping table that relates Groups to their permissions for SubscriptionGroups."""
user_group = models.ForeignKey(Group, related_name="alert_groups")
sub_group = models.ForeignKey(SubscriptionGroup, related_name="group_permissions")
can_broadcast = models.BooleanField(default=False)
can_join = models.BooleanField(default=False)
| apache-2.0 |
tracierenea/gnuradio | gr-filter/examples/fir_filter_ccc.py | 47 | 4019 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
maryklayne/Funcao | sympy/ntheory/residue_ntheory.py | 16 | 23167 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
from sympy.core.singleton import S
from sympy.core.numbers import igcd, igcdex
from sympy.core.compatibility import as_int, xrange
from sympy.core.function import Function
from .primetest import isprime
from .factor_ import factorint, trailing, totient
from random import randint
def n_order(a, n):
"""Returns the order of ``a`` modulo ``n``.
The order of ``a`` modulo ``n`` is the smallest integer
``k`` such that ``a**k`` leaves a remainder of 1 with ``n``.
Examples
========
>>> from sympy.ntheory import n_order
>>> n_order(3, 7)
6
>>> n_order(4, 7)
3
"""
from collections import defaultdict
a, n = as_int(a), as_int(n)
if igcd(a, n) != 1:
raise ValueError("The two numbers should be relatively prime")
factors = defaultdict(int)
f = factorint(n)
for px, kx in f.items():
if kx > 1:
factors[px] += kx - 1
fpx = factorint(px - 1)
for py, ky in fpx.items():
factors[py] += ky
group_order = 1
for px, kx in factors.items():
group_order *= px**kx
order = 1
if a > n:
a = a % n
for p, e in factors.items():
exponent = group_order
for f in xrange(e + 1):
if pow(a, exponent, n) != 1:
order *= p ** (e - f + 1)
break
exponent = exponent // p
return order
def _primitive_root_prime_iter(p):
"""
Generates the primitive roots for a prime ``p``
References
==========
[1] W. Stein "Elementary Number Theory" (2011), page 44
Examples
========
>>> from sympy.ntheory.residue_ntheory import _primitive_root_prime_iter
>>> list(_primitive_root_prime_iter(19))
[2, 3, 10, 13, 14, 15]
"""
p = as_int(p)
v = [(p - 1) // i for i in factorint(p - 1).keys()]
a = 2
while a < p:
for pw in v:
if pow(a, pw, p) == 1:
break
else:
yield a
a += 1
def primitive_root(p):
"""
Returns the smallest primitive root or None
References
==========
[1] W. Stein "Elementary Number Theory" (2011), page 44
[2] P. Hackman "Elementary Number Theory" (2009), Chapter C
Parameters
==========
p : positive integer
Examples
========
>>> from sympy.ntheory.residue_ntheory import primitive_root
>>> primitive_root(19)
2
"""
p = as_int(p)
if p < 1:
raise ValueError('p is required to be positive')
if p <= 2:
return 1
f = factorint(p)
if len(f) > 2:
return None
if len(f) == 2:
if 2 not in f or f[2] > 1:
return None
# case p = 2*p1**k, p1 prime
for p1, e1 in f.items():
if p1 != 2:
break
i = 1
while i < p:
i += 2
if i % p1 == 0:
continue
if is_primitive_root(i, p):
return i
else:
if 2 in f:
if p == 4:
return 3
return None
p1, n = list(f.items())[0]
if n > 1:
# see Ref [2], page 81
g = primitive_root(p1)
if is_primitive_root(g, p1**2):
return g
else:
for i in xrange(2, g + p1 + 1):
if igcd(i, p) == 1 and is_primitive_root(i, p):
return i
return next(_primitive_root_prime_iter(p))
def is_primitive_root(a, p):
"""
Returns True if ``a`` is a primitive root of ``p``
``a`` is said to be the primitive root of ``p`` if gcd(a, p) == 1 and
totient(p) is the smallest positive number s.t.
a**totient(p) cong 1 mod(p)
Examples
========
>>> from sympy.ntheory import is_primitive_root, n_order, totient
>>> is_primitive_root(3, 10)
True
>>> is_primitive_root(9, 10)
False
>>> n_order(3, 10) == totient(10)
True
>>> n_order(9, 10) == totient(10)
False
"""
a, p = as_int(a), as_int(p)
if igcd(a, p) != 1:
raise ValueError("The two numbers should be relatively prime")
if a > p:
a = a % p
return n_order(a, p) == totient(p)
def _sqrt_mod_tonelli_shanks(a, p):
"""
Returns the square root in the case of ``p`` prime with ``p == 1 (mod 8)``
References
==========
R. Crandall and C. Pomerance "Prime Numbers", 2nt Ed., page 101
"""
s = trailing(p - 1)
t = p >> s
# find a non-quadratic residue
while 1:
d = randint(2, p - 1)
r = legendre_symbol(d, p)
if r == -1:
break
#assert legendre_symbol(d, p) == -1
A = pow(a, t, p)
D = pow(d, t, p)
m = 0
for i in xrange(s):
adm = A*pow(D, m, p) % p
adm = pow(adm, 2**(s - 1 - i), p)
if adm % p == p - 1:
m += 2**i
#assert A*pow(D, m, p) % p == 1
x = pow(a, (t + 1)//2, p)*pow(D, m//2, p) % p
return x
def sqrt_mod(a, p, all_roots=False):
"""
find a root of ``x**2 = a mod p``
Parameters
==========
a : integer
p : positive integer
all_roots : if True the list of roots is returned or None
Notes
=====
If there is no root it is returned None; else the returned root
is less or equal to ``p // 2``; in general is not the smallest one.
It is returned ``p // 2`` only if it is the only root.
Use ``all_roots`` only when it is expected that all the roots fit
in memory; otherwise use ``sqrt_mod_iter``.
Examples
========
>>> from sympy.ntheory import sqrt_mod
>>> sqrt_mod(11, 43)
21
>>> sqrt_mod(17, 32, True)
[7, 9, 23, 25]
"""
if all_roots:
return sorted(list(sqrt_mod_iter(a, p)))
try:
p = abs(as_int(p))
it = sqrt_mod_iter(a, p)
r = next(it)
if r > p // 2:
return p - r
elif r < p // 2:
return r
else:
try:
r = next(it)
if r > p // 2:
return p - r
except StopIteration:
pass
return r
except StopIteration:
return None
def _product(*iters):
"""
cartesian product generator
Notes
=====
Unlike itertools.product, it works also with iterables which do not fit
in memory. See http://bugs.python.org/issue10109
Author: Fernando Sumudu
with small changes
"""
import itertools
inf_iters = tuple(itertools.cycle(enumerate(it)) for it in iters)
num_iters = len(inf_iters)
cur_val = [None]*num_iters
first_v = True
while True:
i, p = 0, num_iters
while p and not i:
p -= 1
i, cur_val[p] = next(inf_iters[p])
if not p and not i:
if first_v:
first_v = False
else:
break
yield cur_val
def sqrt_mod_iter(a, p, domain=int):
"""
iterate over solutions to ``x**2 = a mod p``
Parameters
==========
a : integer
p : positive integer
domain : integer domain, ``int``, ``ZZ`` or ``Integer``
Examples
========
>>> from sympy.ntheory.residue_ntheory import sqrt_mod_iter
>>> list(sqrt_mod_iter(11, 43))
[21, 22]
"""
from sympy.polys.galoistools import gf_crt, gf_crt1, gf_crt2
from sympy.polys.domains import ZZ
a, p = as_int(a), abs(as_int(p))
if isprime(p):
a = a % p
if a == 0:
res = _sqrt_mod1(a, p, 1)
else:
res = _sqrt_mod_prime_power(a, p, 1)
if res:
if domain is ZZ:
for x in res:
yield x
else:
for x in res:
yield domain(x)
else:
f = factorint(p)
v = []
pv = []
for px, ex in f.items():
if a % px == 0:
rx = _sqrt_mod1(a, px, ex)
if not rx:
raise StopIteration
else:
rx = _sqrt_mod_prime_power(a, px, ex)
if not rx:
raise StopIteration
v.append(rx)
pv.append(px**ex)
mm, e, s = gf_crt1(pv, ZZ)
if domain is ZZ:
for vx in _product(*v):
r = gf_crt2(vx, pv, mm, e, s, ZZ)
yield r
else:
for vx in _product(*v):
r = gf_crt2(vx, pv, mm, e, s, ZZ)
yield domain(r)
def _sqrt_mod_prime_power(a, p, k):
"""
find the solutions to ``x**2 = a mod p**k`` when ``a % p != 0``
Parameters
==========
a : integer
p : prime number
k : positive integer
References
==========
[1] P. Hackman "Elementary Number Theory" (2009), page 160
[2] http://www.numbertheory.org/php/squareroot.html
[3] [Gathen99]_
Examples
========
>>> from sympy.ntheory.residue_ntheory import _sqrt_mod_prime_power
>>> _sqrt_mod_prime_power(11, 43, 1)
[21, 22]
"""
from sympy.core.numbers import igcdex
from sympy.polys.domains import ZZ
pk = p**k
a = a % pk
if k == 1:
if p == 2:
return [ZZ(a)]
if not is_quad_residue(a, p):
return None
if p % 4 == 3:
res = pow(a, (p + 1) // 4, p)
elif p % 8 == 5:
sign = pow(a, (p - 1) // 4, p)
if sign == 1:
res = pow(a, (p + 3) // 8, p)
else:
b = pow(4*a, (p - 5) // 8, p)
x = (2*a*b) % p
if pow(x, 2, p) == a:
res = x
else:
res = _sqrt_mod_tonelli_shanks(a, p)
# ``_sqrt_mod_tonelli_shanks(a, p)`` is not deterministic;
# sort to get always the same result
return sorted([ZZ(res), ZZ(p - res)])
if k > 1:
f = factorint(a)
# see Ref.[2]
if p == 2:
if a % 8 != 1:
return None
if k <= 3:
s = set()
for i in xrange(0, pk, 4):
s.add(1 + i)
s.add(-1 + i)
return list(s)
# according to Ref.[2] for k > 2 there are two solutions
# (mod 2**k-1), that is four solutions (mod 2**k), which can be
# obtained from the roots of x**2 = 0 (mod 8)
rv = [ZZ(1), ZZ(3), ZZ(5), ZZ(7)]
# hensel lift them to solutions of x**2 = 0 (mod 2**k)
# if r**2 - a = 0 mod 2**nx but not mod 2**(nx+1)
# then r + 2**(nx - 1) is a root mod 2**(nx+1)
n = 3
res = []
for r in rv:
nx = n
while nx < k:
r1 = (r**2 - a) >> nx
if r1 % 2:
r = r + (1 << (nx - 1))
#assert (r**2 - a)% (1 << (nx + 1)) == 0
nx += 1
if r not in res:
res.append(r)
x = r + (1 << (k - 1))
#assert (x**2 - a) % pk == 0
if x < (1 << nx) and x not in res:
if (x**2 - a) % pk == 0:
res.append(x)
return res
rv = _sqrt_mod_prime_power(a, p, 1)
if not rv:
return None
r = rv[0]
fr = r**2 - a
# hensel lifting with Newton iteration, see Ref.[3] chapter 9
# with f(x) = x**2 - a; one has f'(a) != 0 (mod p) for p != 2
n = 1
px = p
while 1:
n1 = n
n1 *= 2
if n1 > k:
break
n = n1
px = px**2
frinv = igcdex(2*r, px)[0]
r = (r - fr*frinv) % px
fr = r**2 - a
if n < k:
px = p**k
frinv = igcdex(2*r, px)[0]
r = (r - fr*frinv) % px
return [r, px - r]
def _sqrt_mod1(a, p, n):
"""
find solution to ``x**2 == a mod p**n`` when ``a % p == 0``
see http://www.numbertheory.org/php/squareroot.html
"""
pn = p**n
a = a % pn
if a == 0:
# case gcd(a, p**k) = p**n
m = n // 2
if n % 2 == 1:
pm1 = p**(m + 1)
def _iter0a():
i = 0
while i < pn:
yield i
i += pm1
return _iter0a()
else:
pm = p**m
def _iter0b():
i = 0
while i < pn:
yield i
i += pm
return _iter0b()
# case gcd(a, p**k) = p**r, r < n
f = factorint(a)
r = f[p]
if r % 2 == 1:
return None
m = r // 2
a1 = a >> r
if p == 2:
if n - r == 1:
pnm1 = 1 << (n - m + 1)
pm1 = 1 << (m + 1)
def _iter1():
k = 1 << (m + 2)
i = 1 << m
while i < pnm1:
j = i
while j < pn:
yield j
j += k
i += pm1
return _iter1()
if n - r == 2:
res = _sqrt_mod_prime_power(a1, p, n - r)
if res is None:
return None
pnm = 1 << (n - m)
def _iter2():
s = set()
for r in res:
i = 0
while i < pn:
x = (r << m) + i
if x not in s:
s.add(x)
yield x
i += pnm
return _iter2()
if n - r > 2:
res = _sqrt_mod_prime_power(a1, p, n - r)
if res is None:
return None
pnm1 = 1 << (n - m - 1)
def _iter3():
s = set()
for r in res:
i = 0
while i < pn:
x = ((r << m) + i) % pn
if x not in s:
s.add(x)
yield x
i += pnm1
return _iter3()
else:
m = r // 2
a1 = a // p**r
res1 = _sqrt_mod_prime_power(a1, p, n - r)
if res1 is None:
return None
pm = p**m
pnr = p**(n-r)
pnm = p**(n-m)
def _iter4():
s = set()
pm = p**m
for rx in res1:
i = 0
while i < pnm:
x = ((rx + i) % pn)
if x not in s:
s.add(x)
yield x*pm
i += pnr
return _iter4()
def is_quad_residue(a, p):
"""
Returns True if ``a`` (mod ``p``) is in the set of squares mod ``p``,
i.e a % p in set([i**2 % p for i in range(p)]). If ``p`` is an odd
prime, an iterative method is used to make the determination:
>>> from sympy.ntheory import is_quad_residue
>>> list(set([i**2 % 7 for i in range(7)]))
[0, 1, 2, 4]
>>> [j for j in range(7) if is_quad_residue(j, 7)]
[0, 1, 2, 4]
See Also
========
legendre_symbol, jacobi_symbol
"""
a, p = as_int(a), as_int(p)
if p < 1:
raise ValueError('p must be > 0')
if a >= p or a < 0:
a = a % p
if a < 2 or p < 3:
return True
if not isprime(p):
if p % 2 and jacobi_symbol(a, p) == -1:
return False
r = sqrt_mod(a, p)
if r is None:
return False
else:
return True
return pow(a, (p - 1) // 2, p) == 1
def is_nthpow_residue(a, n, m):
"""
Returns True if ``x**n == a (mod m)`` has solutions.
References
==========
P. Hackman "Elementary Number Theory" (2009), page 76
"""
if n == 1:
return True
if n == 2:
return is_quad_residue(a, m)
f = totient(m)
k = f // igcd(f, n)
return pow(a, k, m) == 1
def _nthroot_mod2(s, q, p):
f = factorint(q)
v = []
for b, e in f.items():
v.extend([b]*e)
for qx in v:
s = _nthroot_mod1(s, qx, p, False)
return s
def _nthroot_mod1(s, q, p, all_roots):
"""
Root of ``x**q = s mod p``, ``p`` prime and ``q`` divides ``p - 1``
References
==========
[1] A. M. Johnston "A Generalized qth Root Algorithm"
"""
g = primitive_root(p)
if not isprime(q):
r = _nthroot_mod2(s, q, p)
else:
f = p - 1
assert (p - 1) % q == 0
# determine k
k = 0
while f % q == 0:
k += 1
f = f // q
# find z, x, r1
f1 = igcdex(-f, q)[0] % q
z = f*f1
x = (1 + z) // q
w = pow(g, z, p)
r1 = pow(s, x, p)
s1 = pow(s, f, p)
y = pow(g, f, p)
h = pow(g, f*q, p)
# find t discrete log of s1 base h, h**x = s1 mod p
# used a naive implementation
# TODO implement using Ref [1]
pr = 1
for t in xrange(p):
if pr == s1:
break
pr = pr*h % p
g2 = pow(g, z*t, p)
g3 = igcdex(g2, p)[0]
r = r1*g3 % p
#assert pow(r, q, p) == s
res = [r]
h = pow(g, (p - 1) // q, p)
#assert pow(h, q, p) == 1
hx = r
for i in range(q - 1):
hx = (hx*h) % p
res.append(hx)
if all_roots:
res.sort()
return res
return min(res)
def nthroot_mod(a, n, p, all_roots=False):
"""
find the solutions to ``x**n = a mod p``
Parameters
==========
a : integer
n : positive integer
p : positive integer
all_roots : if False returns the smallest root, else the list of roots
Examples
========
>>> from sympy.ntheory.residue_ntheory import nthroot_mod
>>> nthroot_mod(11, 4, 19)
8
>>> nthroot_mod(11, 4, 19, True)
[8, 11]
>>> nthroot_mod(68, 3, 109)
23
"""
from sympy.core.numbers import igcdex
if n == 2:
return sqrt_mod(a, p , all_roots)
f = totient(p)
# see Hackman "Elementary Number Theory" (2009), page 76
if pow(a, f // igcd(f, n), p) != 1:
return None
if not isprime(p):
raise NotImplementedError
if (p - 1) % n == 0:
return _nthroot_mod1(a, n, p, all_roots)
# The roots of ``x**n - a = 0 (mod p)`` are roots of
# ``gcd(x**n - a, x**(p - 1) - 1) = 0 (mod p)``
pa = n
pb = p - 1
b = 1
if pa < pb:
a, pa, b, pb = b, pb, a, pa
while pb:
# x**pa - a = 0; x**pb - b = 0
# x**pa - a = x**(q*pb + r) - a = (x**pb)**q * x**r - a =
# b**q * x**r - a; x**r - c = 0; c = b**-q * a mod p
q, r = divmod(pa, pb)
c = pow(b, q, p)
c = igcdex(c, p)[0]
c = (c * a) % p
pa, pb = pb, r
a, b = b, c
if pa == 1:
if all_roots:
res = [a]
else:
res = a
elif pa == 2:
return sqrt_mod(a, p , all_roots)
else:
res = _nthroot_mod1(a, pa, p, all_roots)
return res
def quadratic_residues(p):
"""
Returns the list of quadratic residues.
Examples
========
>>> from sympy.ntheory.residue_ntheory import quadratic_residues
>>> quadratic_residues(7)
[0, 1, 2, 4]
"""
r = set()
for i in xrange(p // 2 + 1):
r.add(pow(i, 2, p))
return sorted(list(r))
def legendre_symbol(a, p):
"""
Returns
=======
1. 0 if a is multiple of p
2. 1 if a is a quadratic residue of p
3. -1 otherwise
p should be an odd prime by definition
Examples
========
>>> from sympy.ntheory import legendre_symbol
>>> [legendre_symbol(i, 7) for i in range(7)]
[0, 1, 1, -1, 1, -1, -1]
>>> list(set([i**2 % 7 for i in range(7)]))
[0, 1, 2, 4]
See Also
========
is_quad_residue, jacobi_symbol
"""
a, p = as_int(a), as_int(p)
if not isprime(p) or p == 2:
raise ValueError("p should be an odd prime")
a = a % p
if not a:
return 0
if is_quad_residue(a, p):
return 1
return -1
def jacobi_symbol(m, n):
"""
Returns the product of the legendre_symbol(m, p)
for all the prime factors, p, of n.
Returns
=======
1. 0 if m cong 0 mod(n)
2. 1 if x**2 cong m mod(n) has a solution
3. -1 otherwise
Examples
========
>>> from sympy.ntheory import jacobi_symbol, legendre_symbol
>>> from sympy import Mul, S
>>> jacobi_symbol(45, 77)
-1
>>> jacobi_symbol(60, 121)
1
The relationship between the jacobi_symbol and legendre_symbol can
be demonstrated as follows:
>>> L = legendre_symbol
>>> S(45).factors()
{3: 2, 5: 1}
>>> jacobi_symbol(7, 45) == L(7, 3)**2 * L(7, 5)**1
True
See Also
========
is_quad_residue, legendre_symbol
"""
m, n = as_int(m), as_int(n)
if not n % 2:
raise ValueError("n should be an odd integer")
if m < 0 or m > n:
m = m % n
if not m:
return int(n == 1)
if n == 1 or m == 1:
return 1
if igcd(m, n) != 1:
return 0
j = 1
s = trailing(m)
m = m >> s
if s % 2 and n % 8 in [3, 5]:
j *= -1
while m != 1:
if m % 4 == 3 and n % 4 == 3:
j *= -1
m, n = n % m, m
s = trailing(m)
m = m >> s
if s % 2 and n % 8 in [3, 5]:
j *= -1
return j
class mobius(Function):
"""
Möbius function maps natural number to {-1, 0, 1}
It is defined as follows:
1) `1` if `n = 1`.
2) `0` if `n` has a squared prime factor.
3) `(-1)^k` if `n` is a square-free positive integer with `k`
number of prime factors.
It is an important multiplicative function in number theory
and combinatorics. It has applications in mathematical series,
algebraic number theory and also physics (Fermion operator has very
concrete realization with Möbius Function model).
Parameters
==========
n : positive integer
Examples
========
>>> from sympy.ntheory import mobius
>>> mobius(13*7)
1
>>> mobius(1)
1
>>> mobius(13*7*5)
-1
>>> mobius(13**2)
0
References
==========
.. [1] http://en.wikipedia.org/wiki/M%C3%B6bius_function
.. [2] Thomas Koshy "Elementary Number Theory with Applications"
"""
@classmethod
def eval(cls, n):
if n.is_integer:
if n.is_positive is not True:
raise ValueError("n should be a positive integer")
else:
raise TypeError("n should be an integer")
if n.is_prime:
return S.NegativeOne
elif n is S.One:
return S.One
elif n.is_Integer:
a = factorint(n)
if any(i > 1 for i in a.values()):
return S.Zero
return S.NegativeOne**len(a)
| bsd-3-clause |
Khan/pygments | pygments/lexers/compiled.py | 6 | 130406 | # -*- coding: utf-8 -*-
"""
pygments.lexers.compiled
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for compiled languages.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from string import Template
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this, combined
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Literal
from pygments.scanner import Scanner
# backwards compatibility
from pygments.lexers.functional import OcamlLexer
from pygments.lexers.jvm import JavaLexer, ScalaLexer
__all__ = ['CLexer', 'CppLexer', 'DLexer', 'DelphiLexer', 'ECLexer',
'DylanLexer', 'ObjectiveCLexer', 'FortranLexer', 'GLShaderLexer',
'PrologLexer', 'CythonLexer', 'ValaLexer', 'OocLexer', 'GoLexer',
'FelixLexer', 'AdaLexer', 'Modula2Lexer', 'BlitzMaxLexer',
'NimrodLexer', 'FantomLexer', 'RustLexer', 'CUDALexer']
class CLexer(RegexLexer):
"""
For C source code with preprocessor directives.
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc']
mimetypes = ['text/x-chdr', 'text/x-csrc']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r':\s*/[*].*?[*]/\s*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'^(\s*)([a-zA-Z_][a-zA-Z0-9_]*:(?!:))',
bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(auto|break|case|const|continue|default|do|else|enum|extern|'
r'for|goto|if|register|restricted|return|sizeof|static|struct|'
r'switch|typedef|union|volatile|virtual|while)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void)\b',
Keyword.Type),
(r'(_{0,2}inline|naked|restrict|thread|typename)\b', Keyword.Reserved),
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave)\b', Keyword.Reserved),
(r'(true|false|NULL)\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?({)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t', 'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options,
'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options,
'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class CppLexer(RegexLexer):
"""
For C++ source code with preprocessor directives.
"""
name = 'C++'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r':\s*/[*].*?[*]/\s*'
tokens = {
'root': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(asm|auto|break|case|catch|const|const_cast|continue|'
r'default|delete|do|dynamic_cast|else|enum|explicit|export|'
r'extern|for|friend|goto|if|mutable|namespace|new|operator|'
r'private|protected|public|register|reinterpret_cast|return|'
r'restrict|sizeof|static|static_cast|struct|switch|template|'
r'this|throw|throws|try|typedef|typeid|typename|union|using|'
r'volatile|virtual|while)\b', Keyword),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(bool|int|long|float|short|double|char|unsigned|signed|'
r'void|wchar_t)\b', Keyword.Type),
(r'(_{0,2}inline|naked|thread)\b', Keyword.Reserved),
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave|wchar_t|w64|virtual_inheritance|'
r'uuidof|unaligned|super|single_inheritance|raise|noop|'
r'multiple_inheritance|m128i|m128d|m128|m64|interface|'
r'identifier|forceinline|event|assume)\b', Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'(__offload|__blockingoffload|__outer)\b', Keyword.Pseudo),
(r'(true|false)\b', Keyword.Constant),
(r'NULL\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
class ECLexer(RegexLexer):
"""
For eC source code with preprocessor directives.
*New in Pygments 1.5.*
"""
name = 'eC'
aliases = ['ec']
filenames = ['*.ec', '*.eh']
mimetypes = ['text/x-echdr', 'text/x-ecsrc']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r':\s*/[*].*?[*]/\s*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
('^' + _ws1 + '#', Comment.Preproc, 'macro'),
(r'^(\s*)([a-zA-Z_][a-zA-Z0-9_]*:(?!:))', bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(auto|break|case|const|continue|default|do|else|enum|extern|'
r'for|goto|if|register|restricted|return|sizeof|static|struct|'
r'switch|typedef|union|volatile|virtual|while|class|private|public|'
r'property|import|delete|new|new0|renew|renew0|define|get|set|remote|dllexport|dllimport|stdcall|'
r'subclass|__on_register_module|namespace|using|typed_object|any_object|incref|register|watch|'
r'stopwatching|firewatchers|watchable|class_designer|class_fixed|class_no_expansion|isset|'
r'class_default_property|property_category|class_data|class_property|virtual|thisclass|'
r'dbtable|dbindex|database_open|dbfield)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void)\b',
Keyword.Type),
(r'(uint|uint16|uint32|uint64|bool|byte|unichar|int64)\b',
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(_{0,2}inline|naked|restrict|thread|typename)\b', Keyword.Reserved),
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave)\b', Keyword.Reserved),
(r'(true|false|null|value|this|NULL)\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?({)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
('', Text, 'statement'),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t', 'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options,
'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options,
'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class DLexer(RegexLexer):
"""
For D source.
*New in Pygments 1.2.*
"""
name = 'D'
filenames = ['*.d', '*.di']
aliases = ['d']
mimetypes = ['text/x-dsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
#(r'\\\n', Text), # line continuations
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nested_comment'),
# Keywords
(r'(abstract|alias|align|asm|assert|auto|body|break|case|cast'
r'|catch|class|const|continue|debug|default|delegate|delete'
r'|deprecated|do|else|enum|export|extern|finally|final'
r'|foreach_reverse|foreach|for|function|goto|if|import|inout'
r'|interface|invariant|in|is|lazy|mixin|module|new|nothrow|out'
r'|override|package|pragma|private|protected|public|pure|ref|return'
r'|scope|static|struct|super|switch|synchronized|template|this'
r'|throw|try|typedef|typeid|typeof|union|unittest|version|volatile'
r'|while|with|__traits)\b', Keyword
),
(r'(bool|byte|cdouble|cent|cfloat|char|creal|dchar|double|float'
r'|idouble|ifloat|int|ireal|long|real|short|ubyte|ucent|uint|ulong'
r'|ushort|void|wchar)\b', Keyword.Type
),
(r'(false|true|null)\b', Keyword.Constant),
(r'macro\b', Keyword.Reserved),
(r'(string|wstring|dstring)\b', Name.Builtin),
# FloatLiteral
# -- HexFloat
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number),
# -- Octal
(r'0[0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""",
String.Char
),
# StringLiteral
# -- WysiwygString
(r'r"[^"]*"[cwd]?', String),
# -- AlternateWysiwygString
(r'`[^`]*`[cwd]?', String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"[cwd]?', String),
# -- EscapeSequence
(r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}"
r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)",
String
),
# -- HexString
(r'x"[0-9a-fA-F_\s]*"[cwd]?', String),
# -- DelimitedString
(r'q"\[', String, 'delimited_bracket'),
(r'q"\(', String, 'delimited_parenthesis'),
(r'q"<', String, 'delimited_angle'),
(r'q"{', String, 'delimited_curly'),
(r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String),
(r'q"(.).*?\1"', String),
# -- TokenString
(r'q{', String, 'token_string'),
# Tokens
(r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>='
r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)'
r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation
),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'nested_comment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
'token_string': [
(r'{', Punctuation, 'token_string_nest'),
(r'}', String, '#pop'),
include('root'),
],
'token_string_nest': [
(r'{', Punctuation, '#push'),
(r'}', Punctuation, '#pop'),
include('root'),
],
'delimited_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, 'delimited_inside_bracket'),
(r'\]"', String, '#pop'),
],
'delimited_inside_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, '#push'),
(r'\]', String, '#pop'),
],
'delimited_parenthesis': [
(r'[^\(\)]+', String),
(r'\(', String, 'delimited_inside_parenthesis'),
(r'\)"', String, '#pop'),
],
'delimited_inside_parenthesis': [
(r'[^\(\)]+', String),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'delimited_angle': [
(r'[^<>]+', String),
(r'<', String, 'delimited_inside_angle'),
(r'>"', String, '#pop'),
],
'delimited_inside_angle': [
(r'[^<>]+', String),
(r'<', String, '#push'),
(r'>', String, '#pop'),
],
'delimited_curly': [
(r'[^{}]+', String),
(r'{', String, 'delimited_inside_curly'),
(r'}"', String, '#pop'),
],
'delimited_inside_curly': [
(r'[^{}]+', String),
(r'{', String, '#push'),
(r'}', String, '#pop'),
],
}
class DelphiLexer(Lexer):
"""
For `Delphi <http://www.borland.com/delphi/>`_ (Borland Object Pascal),
Turbo Pascal and Free Pascal source code.
Additional options accepted:
`turbopascal`
Highlight Turbo Pascal specific keywords (default: ``True``).
`delphi`
Highlight Borland Delphi specific keywords (default: ``True``).
`freepascal`
Highlight Free Pascal specific keywords (default: ``True``).
`units`
A list of units that should be considered builtin, supported are
``System``, ``SysUtils``, ``Classes`` and ``Math``.
Default is to consider all of them builtin.
"""
name = 'Delphi'
aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
filenames = ['*.pas']
mimetypes = ['text/x-pascal']
TURBO_PASCAL_KEYWORDS = [
'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
'const', 'constructor', 'continue', 'destructor', 'div', 'do',
'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
]
DELPHI_KEYWORDS = [
'as', 'class', 'except', 'exports', 'finalization', 'finally',
'initialization', 'is', 'library', 'on', 'property', 'raise',
'threadvar', 'try'
]
FREE_PASCAL_KEYWORDS = [
'dispose', 'exit', 'false', 'new', 'true'
]
BLOCK_KEYWORDS = set([
'begin', 'class', 'const', 'constructor', 'destructor', 'end',
'finalization', 'function', 'implementation', 'initialization',
'label', 'library', 'operator', 'procedure', 'program', 'property',
'record', 'threadvar', 'type', 'unit', 'uses', 'var'
])
FUNCTION_MODIFIERS = set([
'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
'override', 'assembler'
])
# XXX: those aren't global. but currently we know no way for defining
# them just for the type context.
DIRECTIVES = set([
'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
'published', 'public'
])
BUILTIN_TYPES = set([
'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
'widechar', 'widestring', 'word', 'wordbool'
])
BUILTIN_UNITS = {
'System': [
'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
'append', 'arctan', 'assert', 'assigned', 'assignfile',
'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
'dispose', 'doubletocomp', 'endthread', 'enummodules',
'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
'findresourcehinstance', 'flush', 'frac', 'freemem',
'get8087cw', 'getdir', 'getlasterror', 'getmem',
'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd',
'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
'randomize', 'read', 'readln', 'reallocmem',
'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir',
'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
'set8087cw', 'setlength', 'setlinebreakstyle',
'setmemorymanager', 'setstring', 'settextbuf',
'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
'utf8tounicode', 'val', 'vararrayredim', 'varclear',
'widecharlentostring', 'widecharlentostrvar',
'widechartostring', 'widechartostrvar',
'widestringtoucs4string', 'write', 'writeln'
],
'SysUtils': [
'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
'allocmem', 'ansicomparefilename', 'ansicomparestr',
'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
'ansistrscan', 'ansistrupper', 'ansiuppercase',
'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
'callterminateprocs', 'changefileext', 'charlength',
'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
'comparetext', 'createdir', 'createguid', 'currentyear',
'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
'exceptionerrormessage', 'excludetrailingbackslash',
'excludetrailingpathdelimiter', 'expandfilename',
'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
'extractfiledrive', 'extractfileext', 'extractfilename',
'extractfilepath', 'extractrelativepath', 'extractshortpathname',
'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr',
'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage',
'findclose', 'findcmdlineswitch', 'findfirst', 'findnext',
'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr',
'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr',
'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr',
'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir',
'getenvironmentvariable', 'getfileversion', 'getformatsettings',
'getlocaleformatsettings', 'getmodulename', 'getpackagedescription',
'getpackageinfo', 'gettime', 'guidtostring', 'incamonth',
'includetrailingbackslash', 'includetrailingpathdelimiter',
'incmonth', 'initializepackage', 'interlockeddecrement',
'interlockedexchange', 'interlockedexchangeadd',
'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter',
'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident',
'languages', 'lastdelimiter', 'loadpackage', 'loadstr',
'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now',
'outofmemoryerror', 'quotedstr', 'raiselastoserror',
'raiselastwin32error', 'removedir', 'renamefile', 'replacedate',
'replacetime', 'safeloadlibrary', 'samefilename', 'sametext',
'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize',
'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy',
'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp',
'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy',
'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew',
'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos',
'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr',
'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime',
'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint',
'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime',
'strtotimedef', 'strupper', 'supports', 'syserrormessage',
'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime',
'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright',
'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime',
'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime',
'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime',
'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext',
'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase',
'widesamestr', 'widesametext', 'wideuppercase', 'win32check',
'wraptext'
],
'Classes': [
'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize',
'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect',
'extractstrings', 'findclass', 'findglobalcomponent', 'getclass',
'groupdescendantswith', 'hextobin', 'identtoint',
'initinheritedcomponent', 'inttoident', 'invalidpoint',
'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext',
'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource',
'pointsequal', 'readcomponentres', 'readcomponentresex',
'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias',
'registerclasses', 'registercomponents', 'registerintegerconsts',
'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup',
'teststreamformat', 'unregisterclass', 'unregisterclasses',
'unregisterintegerconsts', 'unregistermoduleclasses',
'writecomponentresfile'
],
'Math': [
'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec',
'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil',
'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc',
'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle',
'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance',
'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask',
'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg',
'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate',
'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero',
'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue',
'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue',
'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods',
'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance',
'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd',
'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant',
'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode',
'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev',
'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation',
'tan', 'tanh', 'totalvariance', 'variance'
]
}
ASM_REGISTERS = set([
'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0',
'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0',
'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx',
'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp',
'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6',
'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5',
'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5',
'xmm6', 'xmm7'
])
ASM_INSTRUCTIONS = set([
'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound',
'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw',
'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae',
'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg',
'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb',
'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl',
'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo',
'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb',
'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid',
'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt',
'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd',
'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd',
'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe',
'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle',
'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge',
'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe',
'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave',
'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw',
'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw',
'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr',
'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx',
'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd',
'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw',
'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw',
'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe',
'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror',
'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb',
'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe',
'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle',
'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng',
'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz',
'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl',
'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold',
'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str',
'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit',
'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait',
'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat',
'xlatb', 'xor'
])
def __init__(self, **options):
Lexer.__init__(self, **options)
self.keywords = set()
if get_bool_opt(options, 'turbopascal', True):
self.keywords.update(self.TURBO_PASCAL_KEYWORDS)
if get_bool_opt(options, 'delphi', True):
self.keywords.update(self.DELPHI_KEYWORDS)
if get_bool_opt(options, 'freepascal', True):
self.keywords.update(self.FREE_PASCAL_KEYWORDS)
self.builtins = set()
for unit in get_list_opt(options, 'units', self.BUILTIN_UNITS.keys()):
self.builtins.update(self.BUILTIN_UNITS[unit])
def get_tokens_unprocessed(self, text):
scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE)
stack = ['initial']
in_function_block = False
in_property_block = False
was_dot = False
next_token_is_function = False
next_token_is_property = False
collect_labels = False
block_labels = set()
brace_balance = [0, 0]
while not scanner.eos:
token = Error
if stack[-1] == 'initial':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r'[-+*\/=<>:;,.@\^]'):
token = Operator
# stop label highlighting on next ";"
if collect_labels and scanner.match == ';':
collect_labels = False
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
# abort function naming ``foo = Function(...)``
next_token_is_function = False
# if we are in a function block we count the open
# braces because ootherwise it's impossible to
# determine the end of the modifier context
if in_function_block or in_property_block:
if scanner.match == '(':
brace_balance[0] += 1
elif scanner.match == ')':
brace_balance[0] -= 1
elif scanner.match == '[':
brace_balance[1] += 1
elif scanner.match == ']':
brace_balance[1] -= 1
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name == 'result':
token = Name.Builtin.Pseudo
elif lowercase_name in self.keywords:
token = Keyword
# if we are in a special block and a
# block ending keyword occours (and the parenthesis
# is balanced) we end the current block context
if (in_function_block or in_property_block) and \
lowercase_name in self.BLOCK_KEYWORDS and \
brace_balance[0] <= 0 and \
brace_balance[1] <= 0:
in_function_block = False
in_property_block = False
brace_balance = [0, 0]
block_labels = set()
if lowercase_name in ('label', 'goto'):
collect_labels = True
elif lowercase_name == 'asm':
stack.append('asm')
elif lowercase_name == 'property':
in_property_block = True
next_token_is_property = True
elif lowercase_name in ('procedure', 'operator',
'function', 'constructor',
'destructor'):
in_function_block = True
next_token_is_function = True
# we are in a function block and the current name
# is in the set of registered modifiers. highlight
# it as pseudo keyword
elif in_function_block and \
lowercase_name in self.FUNCTION_MODIFIERS:
token = Keyword.Pseudo
# if we are in a property highlight some more
# modifiers
elif in_property_block and \
lowercase_name in ('read', 'write'):
token = Keyword.Pseudo
next_token_is_function = True
# if the last iteration set next_token_is_function
# to true we now want this name highlighted as
# function. so do that and reset the state
elif next_token_is_function:
# Look if the next token is a dot. If yes it's
# not a function, but a class name and the
# part after the dot a function name
if scanner.test(r'\s*\.\s*'):
token = Name.Class
# it's not a dot, our job is done
else:
token = Name.Function
next_token_is_function = False
# same for properties
elif next_token_is_property:
token = Name.Property
next_token_is_property = False
# Highlight this token as label and add it
# to the list of known labels
elif collect_labels:
token = Name.Label
block_labels.add(scanner.match.lower())
# name is in list of known labels
elif lowercase_name in block_labels:
token = Name.Label
elif lowercase_name in self.BUILTIN_TYPES:
token = Keyword.Type
elif lowercase_name in self.DIRECTIVES:
token = Keyword.Pseudo
# builtins are just builtins if the token
# before isn't a dot
elif not was_dot and lowercase_name in self.builtins:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'):
token = String.Char
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
# if the stack depth is deeper than once, pop
if len(stack) > 1:
stack.pop()
scanner.get_char()
elif stack[-1] == 'string':
if scanner.scan(r"''"):
token = String.Escape
elif scanner.scan(r"'"):
token = String
stack.pop()
elif scanner.scan(r"[^']*"):
token = String
else:
scanner.get_char()
stack.pop()
elif stack[-1] == 'asm':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'end'):
token = Keyword
stack.pop()
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'):
token = Name.Label
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name in self.ASM_INSTRUCTIONS:
token = Keyword
elif lowercase_name in self.ASM_REGISTERS:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'):
token = Operator
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
scanner.get_char()
stack.pop()
# save the dot!!!11
if scanner.match.strip():
was_dot = scanner.match == '.'
yield scanner.start_pos, token, scanner.match or ''
class DylanLexer(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
*New in Pygments 0.7.*
"""
name = 'Dylan'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl']
mimetypes = ['text/x-dylan']
flags = re.DOTALL
tokens = {
'root': [
(r'\b(subclass|abstract|block|c(on(crete|stant)|lass)|domain'
r'|ex(c(eption|lude)|port)|f(unction(al)?)|generic|handler'
r'|i(n(herited|line|stance|terface)|mport)|library|m(acro|ethod)'
r'|open|primary|sealed|si(deways|ngleton)|slot'
r'|v(ariable|irtual))\b', Name.Builtin),
(r'<\w+>', Keyword.Type),
(r'//.*?\n', Comment.Single),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'=>|\b(a(bove|fterwards)|b(e(gin|low)|y)|c(ase|leanup|reate)'
r'|define|else(if)?|end|f(inally|or|rom)|i[fn]|l(et|ocal)|otherwise'
r'|rename|s(elect|ignal)|t(hen|o)|u(n(less|til)|se)|wh(en|ile))\b',
Keyword),
(r'([ \t])([!\$%&\*\/:<=>\?~_^a-zA-Z0-9.+\-]*:)',
bygroups(Text, Name.Variable)),
(r'([ \t]*)(\S+[^:])([ \t]*)(\()([ \t]*)',
bygroups(Text, Name.Function, Text, Punctuation, Text)),
(r'-?[0-9.]+', Number),
(r'[(),;]', Punctuation),
(r'\$[a-zA-Z0-9-]+', Name.Constant),
(r'[!$%&*/:<>=?~^.+\[\]{}-]+', Operator),
(r'\s+', Text),
(r'#"[a-zA-Z0-9-]+"', Keyword),
(r'#[a-zA-Z0-9-]+', Keyword),
(r'#(\(|\[)', Punctuation),
(r'[a-zA-Z0-9-_]+', Name.Variable),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
class ObjectiveCLexer(RegexLexer):
"""
For Objective-C source code with preprocessor directives.
"""
name = 'Objective-C'
aliases = ['objective-c', 'objectivec', 'obj-c', 'objc']
# XXX: objc has .h files too :-/
filenames = ['*.m']
mimetypes = ['text/x-objective-c']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r':\s*/[*].*?[*]/\s*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'(auto|break|case|const|continue|default|do|else|enum|extern|'
r'for|goto|if|register|restricted|return|sizeof|static|struct|'
r'switch|typedef|union|volatile|virtual|while|in|@selector|'
r'@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@optional)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|IBOutlet|IBAction|SEL)\b', Keyword.Type),
(r'(_{0,2}inline|naked|restrict|thread|typename)\b',
Keyword.Reserved),
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave)\b', Keyword.Reserved),
(r'(TRUE|FALSE|nil|NULL)\b', Name.Builtin),
('[a-zA-Z$_][a-zA-Z0-9$_]*:(?!:)', Name.Label),
('[a-zA-Z$_][a-zA-Z0-9$_]*', Name),
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
('#pop', 'classname')),
(r'(@class|@protocol)(\s+)', bygroups(Keyword, Text),
('#pop', 'forward_classname')),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z$_][a-zA-Z0-9$_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?({)',
bygroups(using(this), Name.Function,
using(this), Text, Punctuation),
'function'),
# methods
(r'^([-+])(\s*)' # method marker
r'(\(.*?\))?(\s*)' # return type
r'([a-zA-Z$_][a-zA-Z0-9$_]*:?)', # begin of method name
bygroups(Keyword, Text, using(this),
Text, Name.Function),
'method'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z$_][a-zA-Z0-9$_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(;)',
bygroups(using(this), Name.Function,
using(this), Text, Punctuation)),
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s+)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
('', Text, 'statement'),
],
'classname' : [
# interface definition that inherits
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
# interface definition for a category
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\([a-zA-Z$_][a-zA-Z0-9$_]*\))',
bygroups(Name.Class, Text, Name.Label), '#pop'),
# simple interface / implementation
('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop')
],
'forward_classname' : [
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*,\s*)',
bygroups(Name.Class, Text), 'forward_classname'),
('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop')
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'method': [
include('whitespace'),
(r'(\(.*?\))([a-zA-Z$_][a-zA-Z0-9$_]*)', bygroups(using(this),
Name.Variable)),
(r'[a-zA-Z$_][a-zA-Z0-9$_]*:', Name.Function),
(';', Punctuation, '#pop'),
('{', Punctuation, 'function'),
('', Text, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if '@import' in text or '@interface' in text or \
'@implementation' in text:
return True
elif '@"' in text: # strings
return True
elif re.match(r'\[[a-zA-Z0-9.]:', text): # message
return True
return False
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
*New in Pygments 0.10.*
"""
name = 'Fortran'
aliases = ['fortran']
filenames = ['*.f', '*.f90', '*.F', '*.F90']
mimetypes = ['text/x-fortran']
flags = re.IGNORECASE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z][a-z0-9_]*', Name.Variable),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(r'\b(ACCEPT|ALLOCATABLE|ALLOCATE|ARRAY|ASSIGN|ASYNCHRONOUS|'
r'BACKSPACE|BIND|BLOCK DATA|BYTE|CALL|CASE|CLOSE|COMMON|CONTAINS|'
r'CONTINUE|CYCLE|DATA|DEALLOCATE|DECODE|DEFERRED|DIMENSION|DO|'
r'ELSE|ENCODE|END FILE|ENDIF|END|ENTRY|ENUMERATOR|EQUIVALENCE|'
r'EXIT|EXTERNAL|EXTRINSIC|FINAL|FORALL|FORMAT|FUNCTION|GENERIC|'
r'GOTO|IF|IMPLICIT|IMPORT|INCLUDE|INQUIRE|INTENT|INTERFACE|'
r'INTRINSIC|MODULE|NAMELIST|NULLIFY|NONE|NON_INTRINSIC|'
r'NON_OVERRIDABLE|NOPASS|OPEN|OPTIONAL|OPTIONS|PARAMETER|PASS|'
r'PAUSE|POINTER|PRINT|PRIVATE|PROGRAM|PROTECTED|PUBLIC|PURE|READ|'
r'RECURSIVE|RETURN|REWIND|SAVE|SELECT|SEQUENCE|STOP|SUBROUTINE|'
r'TARGET|THEN|TYPE|USE|VALUE|VOLATILE|WHERE|WRITE|WHILE)\s*\b',
Keyword),
# Data Types
(r'\b(CHARACTER|COMPLEX|DOUBLE PRECISION|DOUBLE COMPLEX|INTEGER|'
r'LOGICAL|REAL|C_INT|C_SHORT|C_LONG|C_LONG_LONG|C_SIGNED_CHAR|'
r'C_SIZE_T|C_INT8_T|C_INT16_T|C_INT32_T|C_INT64_T|C_INT_LEAST8_T|'
r'C_INT_LEAST16_T|C_INT_LEAST32_T|C_INT_LEAST64_T|C_INT_FAST8_T|'
r'C_INT_FAST16_T|C_INT_FAST32_T|C_INT_FAST64_T|C_INTMAX_T|'
r'C_INTPTR_T|C_FLOAT|C_DOUBLE|C_LONG_DOUBLE|C_FLOAT_COMPLEX|'
r'C_DOUBLE_COMPLEX|C_LONG_DOUBLE_COMPLEX|C_BOOL|C_CHAR|C_PTR|'
r'C_FUNPTR)\s*\b',
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[(),:&%;]', Punctuation),
# Intrinsics
(r'\b(Abort|Abs|Access|AChar|ACos|AdjustL|AdjustR|AImag|AInt|Alarm|'
r'All|Allocated|ALog|AMax|AMin|AMod|And|ANInt|Any|ASin|Associated|'
r'ATan|BesJ|BesJN|BesY|BesYN|Bit_Size|BTest|CAbs|CCos|Ceiling|'
r'CExp|Char|ChDir|ChMod|CLog|Cmplx|Command_Argument_Count|Complex|'
r'Conjg|Cos|CosH|Count|CPU_Time|CShift|CSin|CSqRt|CTime|C_Funloc|'
r'C_Loc|C_Associated|C_Null_Ptr|C_Null_Funptr|C_F_Pointer|'
r'C_Null_Char|C_Alert|C_Backspace|C_Form_Feed|C_New_Line|'
r'C_Carriage_Return|C_Horizontal_Tab|C_Vertical_Tab|'
r'DAbs|DACos|DASin|DATan|Date_and_Time|DbesJ|'
r'DbesJ|DbesJN|DbesY|DbesY|DbesYN|Dble|DCos|DCosH|DDiM|DErF|DErFC|'
r'DExp|Digits|DiM|DInt|DLog|DLog|DMax|DMin|DMod|DNInt|Dot_Product|'
r'DProd|DSign|DSinH|DSin|DSqRt|DTanH|DTan|DTime|EOShift|Epsilon|'
r'ErF|ErFC|ETime|Exit|Exp|Exponent|Extends_Type_Of|FDate|FGet|'
r'FGetC|Float|Floor|Flush|FNum|FPutC|FPut|Fraction|FSeek|FStat|'
r'FTell|GError|GetArg|Get_Command|Get_Command_Argument|'
r'Get_Environment_Variable|GetCWD|GetEnv|GetGId|GetLog|GetPId|'
r'GetUId|GMTime|HostNm|Huge|IAbs|IAChar|IAnd|IArgC|IBClr|IBits|'
r'IBSet|IChar|IDate|IDiM|IDInt|IDNInt|IEOr|IErrNo|IFix|Imag|'
r'ImagPart|Index|Int|IOr|IRand|IsaTty|IShft|IShftC|ISign|'
r'Iso_C_Binding|Is_Iostat_End|Is_Iostat_Eor|ITime|Kill|Kind|'
r'LBound|Len|Len_Trim|LGe|LGt|Link|LLe|LLt|LnBlnk|Loc|Log|'
r'Logical|Long|LShift|LStat|LTime|MatMul|Max|MaxExponent|MaxLoc|'
r'MaxVal|MClock|Merge|Move_Alloc|Min|MinExponent|MinLoc|MinVal|'
r'Mod|Modulo|MvBits|Nearest|New_Line|NInt|Not|Or|Pack|PError|'
r'Precision|Present|Product|Radix|Rand|Random_Number|Random_Seed|'
r'Range|Real|RealPart|Rename|Repeat|Reshape|RRSpacing|RShift|'
r'Same_Type_As|Scale|Scan|Second|Selected_Int_Kind|'
r'Selected_Real_Kind|Set_Exponent|Shape|Short|Sign|Signal|SinH|'
r'Sin|Sleep|Sngl|Spacing|Spread|SqRt|SRand|Stat|Sum|SymLnk|'
r'System|System_Clock|Tan|TanH|Time|Tiny|Transfer|Transpose|Trim|'
r'TtyNam|UBound|UMask|Unlink|Unpack|Verify|XOr|ZAbs|ZCos|ZExp|'
r'ZLog|ZSin|ZSqRt)\s*\b',
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.Ee])', Number.Integer),
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
],
}
class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
*New in Pygments 1.1.*
"""
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
#FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(r'\b(attribute|const|uniform|varying|centroid|break|continue|'
r'do|for|while|if|else|in|out|inout|float|int|void|bool|true|'
r'false|invariant|discard|return|mat[234]|mat[234]x[234]|'
r'vec[234]|[ib]vec[234]|sampler[123]D|samplerCube|'
r'sampler[12]DShadow|struct)\b', Keyword),
(r'\b(asm|class|union|enum|typedef|template|this|packed|goto|'
r'switch|default|inline|noinline|volatile|public|static|extern|'
r'external|interface|long|short|double|half|fixed|unsigned|'
r'lowp|mediump|highp|precision|input|output|hvec[234]|'
r'[df]vec[234]|sampler[23]DRect|sampler2DRectShadow|sizeof|'
r'cast|namespace|using)\b', Keyword), #future use
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class PrologLexer(RegexLexer):
"""
Lexer for Prolog files.
"""
name = 'Prolog'
aliases = ['prolog']
filenames = ['*.prolog', '*.pro', '*.pl']
mimetypes = ['text/x-prolog']
flags = re.UNICODE
tokens = {
'root': [
(r'^#.*', Comment.Single),
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
(r'[0-9]+', Number),
(r'[\[\](){}|.,;!]', Punctuation),
(r':-|-->', Punctuation),
(r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\[0-7]+\\|\\[\w\W]|[^"])*"', String.Double),
(r"'(?:''|[^'])*'", String.Atom), # quoted atom
# Needs to not be followed by an atom.
#(r'=(?=\s|[a-zA-Z\[])', Operator),
(r'(is|<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
Operator),
(r'(mod|div|not)\b', Operator),
(r'_', Keyword), # The don't-care variable
(r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(:-|-->)',
bygroups(Name.Function, Text, Operator)), # function defn
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(\\()',
bygroups(Name.Function, Text, Punctuation)),
(u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[a-zA-Z0-9_$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
String.Atom), # atom, characters
# This one includes !
(u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+',
String.Atom), # atom, graphics
(r'[A-Z_][A-Za-z0-9_]*', Name.Variable),
(u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
],
'nested-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'[*/]', Comment.Multiline),
],
}
def analyse_text(text):
return ':-' in text
class CythonLexer(RegexLexer):
"""
For Pyrex and `Cython <http://cython.org>`_ source code.
*New in Pygments 1.1.*
"""
name = 'Cython'
aliases = ['cython', 'pyx']
filenames = ['*.pyx', '*.pxd', '*.pxi']
mimetypes = ['text/x-cython', 'application/x-cython']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'(<)([a-zA-Z0-9.?]+)(>)',
bygroups(Punctuation, Keyword.Type, Punctuation)),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
(r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
bygroups(Keyword, Number.Integer, Operator, Name, Operator,
Name, Punctuation)),
include('keywords'),
(r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
(r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(r'(assert|break|by|continue|ctypedef|del|elif|else|except\??|exec|'
r'finally|for|gil|global|if|include|lambda|nogil|pass|print|raise|'
r'return|try|while|yield|as|with)\b', Keyword),
(r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
],
'builtins': [
(r'(?<!\.)(__import__|abs|all|any|apply|basestring|bin|bool|buffer|'
r'bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|'
r'complex|delattr|dict|dir|divmod|enumerate|eval|execfile|exit|'
r'file|filter|float|frozenset|getattr|globals|hasattr|hash|hex|id|'
r'input|int|intern|isinstance|issubclass|iter|len|list|locals|'
r'long|map|max|min|next|object|oct|open|ord|pow|property|range|'
r'raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|'
r'sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|'
r'vars|xrange|zip)\b', Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
r')\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|DeprecationWarning|EOFError|EnvironmentError|'
r'Exception|FloatingPointError|FutureWarning|GeneratorExit|IOError|'
r'ImportError|ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplemented|NotImplementedError|OSError|OverflowError|'
r'OverflowWarning|PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StandardError|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|Warning|ZeroDivisionError'
r')\b', Name.Exception),
],
'numbers': [
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[a-zA-Z0-9_]+', Name.Decorator),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'cdef': [
(r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
(r'(struct|enum|union|class)\b', Keyword),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(?=[(:#=]|$)',
bygroups(Name.Function, Text), '#pop'),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(,)',
bygroups(Name.Function, Text, Punctuation)),
(r'from\b', Keyword, '#pop'),
(r'as\b', Keyword),
(r':', Punctuation, '#pop'),
(r'(?=["\'])', Text, '#pop'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Keyword.Type),
(r'.', Text),
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
(r'', Text, '#pop') # all else: go back
],
'fromimport': [
(r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
(r'[a-zA-Z_.][a-zA-Z0-9_.]*', Name.Namespace),
# ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
(r'', Text, '#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
*New in Pygments 1.1.*
"""
name = 'Vala'
aliases = ['vala', 'vapi']
filenames = ['*.vala', '*.vapi']
mimetypes = ['text/x-vala']
tokens = {
'whitespace': [
(r'^\s*#if\s+0', Comment.Preproc, 'if0'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation)),
# TODO: "correctly" parse complex code attributes
(r'(\[)(CCode|(?:Integer|Floating)Type)',
bygroups(Punctuation, Name.Decorator)),
(r'[()\[\],.]', Punctuation),
(r'(as|base|break|case|catch|construct|continue|default|delete|do|'
r'else|enum|finally|for|foreach|get|if|in|is|lock|new|out|params|'
r'return|set|sizeof|switch|this|throw|try|typeof|while|yield)\b',
Keyword),
(r'(abstract|const|delegate|dynamic|ensures|extern|inline|internal|'
r'override|owned|private|protected|public|ref|requires|signal|'
r'static|throws|unowned|var|virtual|volatile|weak|yields)\b',
Keyword.Declaration),
(r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text),
'namespace'),
(r'(class|errordomain|interface|struct)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Name.Attribute)),
# void is an actual keyword, others are in glib-2.0.vapi
(r'(void|bool|char|double|float|int|int8|int16|int32|int64|long|'
r'short|size_t|ssize_t|string|time_t|uchar|uint|uint8|uint16|'
r'uint32|uint64|ulong|unichar|ushort)\b', Keyword.Type),
(r'(true|false|null)\b', Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'class': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop')
],
}
class OocLexer(RegexLexer):
"""
For `Ooc <http://ooc-lang.org/>`_ source code
*New in Pygments 1.2.*
"""
name = 'Ooc'
aliases = ['ooc']
filenames = ['*.ooc']
mimetypes = ['text/x-ooc']
tokens = {
'root': [
(r'\b(class|interface|implement|abstract|extends|from|'
r'this|super|new|const|final|static|import|use|extern|'
r'inline|proto|break|continue|fallthrough|operator|if|else|for|'
r'while|do|switch|case|as|in|version|return|true|false|null)\b',
Keyword),
(r'include\b', Keyword, 'include'),
(r'(cover)([ \t]+)(from)([ \t]+)([a-zA-Z0-9_]+[*@]?)',
bygroups(Keyword, Text, Keyword, Text, Name.Class)),
(r'(func)((?:[ \t]|\\\n)+)(~[a-z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Function)),
(r'\bfunc\b', Keyword),
# Note: %= and ^= not listed on http://ooc-lang.org/syntax
(r'//.*', Comment),
(r'(?s)/\*.*?\*/', Comment.Multiline),
(r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|'
r'&&?|\|\|?|\^=?)', Operator),
(r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text,
Name.Function)),
(r'[A-Z][A-Z0-9_]+', Name.Constant),
(r'[A-Z][a-zA-Z0-9_]*([@*]|\[[ \t]*\])?', Name.Class),
(r'([a-z][a-zA-Z0-9_]*(?:~[a-z][a-zA-Z0-9_]*)?)((?:[ \t]|\\\n)*)(?=\()',
bygroups(Name.Function, Text)),
(r'[a-z][a-zA-Z0-9_]*', Name.Variable),
# : introduces types
(r'[:(){}\[\];,]', Punctuation),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'0c[0-9]+', Number.Oct),
(r'0b[01]+', Number.Binary),
(r'[0-9_]\.[0-9_]*(?!\.)', Number.Float),
(r'[0-9_]+', Number.Decimal),
(r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\"])*"',
String.Double),
(r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'@', Punctuation), # pointer dereference
(r'\.', Punctuation), # imports or chain operator
(r'\\[ \t\n]', Text),
(r'[ \t]+', Text),
],
'include': [
(r'[\w/]+', Name),
(r',', Punctuation),
(r'[ \t]', Text),
(r'[;\n]', Text, '#pop'),
],
}
class GoLexer(RegexLexer):
"""
For `Go <http://golang.org>`_ source.
"""
name = 'Go'
filenames = ['*.go']
aliases = ['go']
mimetypes = ['text/x-gosrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuations
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(import|package)\b', Keyword.Namespace),
(r'(var|func|struct|map|chan|type|interface|const)\b', Keyword.Declaration),
(r'(break|default|select|case|defer|go'
r'|else|goto|switch|fallthrough|if|range'
r'|continue|for|return)\b', Keyword
),
(r'(true|false|iota|nil)\b', Keyword.Constant),
# It seems the builtin types aren't actually keywords, but
# can be used as functions. So we need two declarations.
(r'(uint|uint8|uint16|uint32|uint64'
r'|int|int8|int16|int32|int64'
r'|float|float32|float64'
r'|complex64|complex128|byte|rune'
r'|string|bool|error|uintptr'
r'|print|println|panic|recover|close|complex|real|imag'
r'|len|cap|append|copy|delete|new|make)\b(\()', bygroups(Name.Builtin, Punctuation)
),
(r'(uint|uint8|uint16|uint32|uint64'
r'|int|int8|int16|int32|int64'
r'|float|float32|float64'
r'|complex64|complex128|byte|rune'
r'|string|bool|error|uintptr)\b', Keyword.Type
),
# imaginary_lit
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# float_lit
(r'\d+(\.\d+[eE][+\-]?\d+|'
r'\.\d*|[eE][+\-]?\d+)', Number.Float),
(r'\.\d+([eE][+\-]?\d+)?', Number.Float),
# int_lit
# -- octal_lit
(r'0[0-7]+', Number.Oct),
# -- hex_lit
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- decimal_lit
(r'(0|[1-9][0-9]*)', Number.Integer),
# char_lit
(r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
String.Char
),
# StringLiteral
# -- raw_string_lit
(r'`[^`]*`', String),
# -- interpreted_string_lit
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator),
(r'[|^<>=!()\[\]{}.,;:]', Punctuation),
# identifier
(r'[a-zA-Z_]\w*', Name.Other),
]
}
class FelixLexer(RegexLexer):
"""
For `Felix <http://www.felix-lang.org>`_ source code.
*New in Pygments 1.2.*
"""
name = 'Felix'
aliases = ['felix', 'flx']
filenames = ['*.flx', '*.flxh']
mimetypes = ['text/x-felix']
preproc = [
'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
]
keywords = [
'_', '_deref', 'all', 'as',
'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
'when', 'whilst', 'with', 'yield',
]
keyword_directives = [
'_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
'package', 'private', 'pod', 'property', 'public', 'publish',
'requires', 'todo', 'virtual', 'use',
]
keyword_declarations = [
'def', 'let', 'ref', 'val', 'var',
]
keyword_types = [
'unit', 'void', 'any', 'bool',
'byte', 'offset',
'address', 'caddress', 'cvaddress', 'vaddress',
'tiny', 'short', 'int', 'long', 'vlong',
'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float', 'double', 'ldouble',
'complex', 'dcomplex', 'lcomplex',
'imaginary', 'dimaginary', 'limaginary',
'char', 'wchar', 'uchar',
'charp', 'charcp', 'ucharp', 'ucharcp',
'string', 'wstring', 'ustring',
'cont',
'array', 'varray', 'list',
'lvalue', 'opt', 'slice',
]
keyword_constants = [
'false', 'true',
]
operator_words = [
'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
]
name_builtins = [
'_svc', 'while',
]
name_pseudo = [
'root', 'self', 'this',
]
decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
tokens = {
'root': [
include('whitespace'),
# Keywords
(r'(axiom|ctor|fun|gen|proc|reduce|union)\b', Keyword,
'funcname'),
(r'(class|cclass|cstruct|obj|struct)\b', Keyword, 'classname'),
(r'(instance|module|typeclass)\b', Keyword, 'modulename'),
(r'(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)\b' % '|'.join(keyword_directives), Name.Decorator),
(r'(%s)\b' % '|'.join(keyword_declarations), Keyword.Declaration),
(r'(%s)\b' % '|'.join(keyword_types), Keyword.Type),
(r'(%s)\b' % '|'.join(keyword_constants), Keyword.Constant),
# Operators
include('operators'),
# Float Literal
# -- Hex Float
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+%s' % decimal_suffixes, Number),
# -- Octal
(r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
# Strings
('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
('([rR][cC]?|[cC][rR])"', String, 'dqs'),
("([rR][cC]?|[cC][rR])'", String, 'sqs'),
('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
# Punctuation
(r'[\[\]{}:(),;?]', Punctuation),
# Labels
(r'[a-zA-Z_]\w*:>', Name.Label),
# Identifiers
(r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
(r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
include('comment'),
# Preprocessor
(r'#\s*if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
],
'operators': [
(r'(%s)\b' % '|'.join(operator_words), Operator.Word),
(r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
],
'comment': [
(r'//(.*?)\n', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment2'),
],
'comment2': [
(r'[^\/*]', Comment.Multiline),
(r'/[*]', Comment.Multiline, '#push'),
(r'[*]/', Comment.Multiline, '#pop'),
(r'[\/*]', Comment.Multiline),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment, '#pop'),
(r'.*?\n', Comment),
],
'macro': [
include('comment'),
(r'(import|include)(\s+)(<[^>]*?>)',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'(import|include)(\s+)("[^"]*?")',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r"(import|include)(\s+)('[^']*?')",
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'[^/\n]+', Comment.Preproc),
##(r'/[*](.|\n)*?[*]/', Comment),
##(r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'funcname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
# anonymous functions
(r'(?=\()', Text, '#pop'),
],
'classname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# anonymous classes
(r'(?=\{)', Text, '#pop'),
],
'modulename': [
include('whitespace'),
(r'\[', Punctuation, ('modulename2', 'tvarlist')),
(r'', Error, 'modulename2'),
],
'modulename2': [
include('whitespace'),
(r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
],
'tvarlist': [
include('whitespace'),
include('operators'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation),
(r'(with|where)\b', Keyword),
(r'[a-zA-Z_]\w*', Name),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
# included here again for raw strings
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
# included here again for raw strings
(r"\\\\|\\'|\\\n", String.Escape),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
class AdaLexer(RegexLexer):
"""
For Ada source code.
*New in Pygments 1.3.*
"""
name = 'Ada'
aliases = ['ada', 'ada95' 'ada2005']
filenames = ['*.adb', '*.ads', '*.ada']
mimetypes = ['text/x-ada']
flags = re.MULTILINE | re.I # Ignore case
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
(r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
(r'(subtype|type)(\s+)([a-z0-9_]+)',
bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
(r'task|protected', Keyword.Declaration),
(r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
(r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
(r'(pragma)(\s+)([a-zA-Z0-9_]+)', bygroups(Keyword.Reserved, Text,
Comment.Preproc)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(Byte|Character|Float|Integer|Long_Float|Long_Integer|'
r'Long_Long_Float|Long_Long_Integer|Natural|Positive|Short_Float|'
r'Short_Integer|Short_Short_Float|Short_Short_Integer|String|'
r'Wide_String|Duration)\b', Keyword.Type),
(r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
(r'generic|private', Keyword.Declaration),
(r'package', Keyword.Declaration, 'package'),
(r'array\b', Keyword.Reserved, 'array_def'),
(r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'([a-z0-9_]+)(\s*)(:)(\s*)(constant)',
bygroups(Name.Constant, Text, Punctuation, Text,
Keyword.Reserved)),
(r'<<[a-z0-9_]+>>', Name.Label),
(r'([a-z0-9_]+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
(r'\b(abort|abs|abstract|accept|access|aliased|all|array|at|begin|'
r'body|case|constant|declare|delay|delta|digits|do|else|elsif|end|'
r'entry|exception|exit|interface|for|goto|if|is|limited|loop|new|'
r'null|of|or|others|out|overriding|pragma|protected|raise|range|'
r'record|renames|requeue|return|reverse|select|separate|subtype|'
r'synchronized|task|tagged|terminate|then|type|until|when|while|'
r'xor)\b',
Keyword.Reserved),
(r'"[^"]*"', String),
include('attribute'),
include('numbers'),
(r"'[^']'", String.Character),
(r'([a-z0-9_]+)(\s*|[(,])', bygroups(Name, using(this))),
(r"(<>|=>|:=|[()|:;,.'])", Punctuation),
(r'[*<>+=/&-]', Operator),
(r'\n+', Text),
],
'numbers' : [
(r'[0-9_]+#[0-9a-f]+#', Number.Hex),
(r'[0-9_]+\.[0-9_]*', Number.Float),
(r'[0-9_]+', Number.Integer),
],
'attribute' : [
(r"(')([a-zA-Z0-9_]+)", bygroups(Punctuation, Name.Attribute)),
],
'subprogram' : [
(r'\(', Punctuation, ('#pop', 'formal_part')),
(r';', Punctuation, '#pop'),
(r'is\b', Keyword.Reserved, '#pop'),
(r'"[^"]+"|[a-z0-9_]+', Name.Function),
include('root'),
],
'end' : [
('(if|case|record|loop|select)', Keyword.Reserved),
('"[^"]+"|[a-zA-Z0-9_.]+', Name.Function),
('\s+', Text),
(';', Punctuation, '#pop'),
],
'type_def': [
(r';', Punctuation, '#pop'),
(r'\(', Punctuation, 'formal_part'),
(r'with|and|use', Keyword.Reserved),
(r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
(r'record\b', Keyword.Reserved, ('formal_part')),
include('root'),
],
'array_def' : [
(r';', Punctuation, '#pop'),
(r'([a-z0-9_]+)(\s+)(range)', bygroups(Keyword.Type, Text,
Keyword.Reserved)),
include('root'),
],
'import': [
(r'[a-z0-9_.]+', Name.Namespace, '#pop'),
(r'', Text, '#pop'),
],
'formal_part' : [
(r'\)', Punctuation, '#pop'),
(r'[a-z0-9_]+', Name.Variable),
(r',|:[^=]', Punctuation),
(r'(in|not|null|out|access)\b', Keyword.Reserved),
include('root'),
],
'package': [
('body', Keyword.Declaration),
('is\s+new|renames', Keyword.Reserved),
('is', Keyword.Reserved, '#pop'),
(';', Punctuation, '#pop'),
('\(', Punctuation, 'package_instantiation'),
('([a-zA-Z0-9_.]+)', Name.Class),
include('root'),
],
'package_instantiation': [
(r'("[^"]+"|[a-z0-9_]+)(\s+)(=>)', bygroups(Name.Variable,
Text, Punctuation)),
(r'[a-z0-9._\'"]', Text),
(r'\)', Punctuation, '#pop'),
include('root'),
],
}
class Modula2Lexer(RegexLexer):
"""
For `Modula-2 <http://www.modula2.org/>`_ source code.
Additional options that determine which keywords are highlighted:
`pim`
Select PIM Modula-2 dialect (default: True).
`iso`
Select ISO Modula-2 dialect (default: False).
`objm2`
Select Objective Modula-2 dialect (default: False).
`gm2ext`
Also highlight GNU extensions (default: False).
*New in Pygments 1.3.*
"""
name = 'Modula-2'
aliases = ['modula2', 'm2']
filenames = ['*.def', '*.mod']
mimetypes = ['text/x-modula2']
flags = re.MULTILINE | re.DOTALL
tokens = {
'whitespace': [
(r'\n+', Text), # blank lines
(r'\s+', Text), # whitespace
],
'identifiers': [
(r'([a-zA-Z_\$][a-zA-Z0-9_\$]*)', Name),
],
'numliterals': [
(r'[01]+B', Number.Binary), # binary number (ObjM2)
(r'[0-7]+B', Number.Oct), # octal number (PIM + ISO)
(r'[0-7]+C', Number.Oct), # char code (PIM + ISO)
(r'[0-9A-F]+C', Number.Hex), # char code (ObjM2)
(r'[0-9A-F]+H', Number.Hex), # hexadecimal number
(r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number
(r'[0-9]+\.[0-9]+', Number.Float), # real number
(r'[0-9]+', Number.Integer), # decimal whole number
],
'strings': [
(r"'(\\\\|\\'|[^'])*'", String), # single quoted string
(r'"(\\\\|\\"|[^"])*"', String), # double quoted string
],
'operators': [
(r'[*/+=#~&<>\^-]', Operator),
(r':=', Operator), # assignment
(r'@', Operator), # pointer deref (ISO)
(r'\.\.', Operator), # ellipsis or range
(r'`', Operator), # Smalltalk message (ObjM2)
(r'::', Operator), # type conversion (ObjM2)
],
'punctuation': [
(r'[\(\)\[\]{},.:;|]', Punctuation),
],
'comments': [
(r'//.*?\n', Comment.Single), # ObjM2
(r'/\*(.*?)\*/', Comment.Multiline), # ObjM2
(r'\(\*([^\$].*?)\*\)', Comment.Multiline),
# TO DO: nesting of (* ... *) comments
],
'pragmas': [
(r'\(\*\$(.*?)\*\)', Comment.Preproc), # PIM
(r'<\*(.*?)\*>', Comment.Preproc), # ISO + ObjM2
],
'root': [
include('whitespace'),
include('comments'),
include('pragmas'),
include('identifiers'),
include('numliterals'),
include('strings'),
include('operators'),
include('punctuation'),
]
}
pim_reserved_words = [
# 40 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION',
'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'EXPORT', 'FOR',
'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD',
'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'QUALIFIED',
'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
'UNTIL', 'VAR', 'WHILE', 'WITH',
]
pim_pervasives = [
# 31 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'DEC',
'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL',
'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', 'NIL', 'ODD',
'ORD', 'PROC', 'REAL', 'SIZE', 'TRUE', 'TRUNC', 'VAL',
]
iso_reserved_words = [
# 46 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY',
'FOR', 'FORWARD', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN',
'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'PACKEDSET', 'POINTER',
'PROCEDURE', 'QUALIFIED', 'RECORD', 'REPEAT', 'REM', 'RETRY',
'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
'WITH',
]
iso_pervasives = [
# 42 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'CMPLX',
'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH',
'IM', 'INC', 'INCL', 'INT', 'INTEGER', 'INTERRUPTIBLE', 'LENGTH',
'LFLOAT', 'LONGCOMPLEX', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW',
'NIL', 'ODD', 'ORD', 'PROC', 'PROTECTION', 'RE', 'REAL', 'SIZE',
'TRUE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
]
objm2_reserved_words = [
# base language, 42 reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'ENUM', 'EXIT', 'FOR', 'FROM', 'IF',
'IMMUTABLE', 'IMPLEMENTATION', 'IMPORT', 'IN', 'IS', 'LOOP', 'MOD',
'MODULE', 'NOT', 'OF', 'OPAQUE', 'OR', 'POINTER', 'PROCEDURE',
'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
'UNTIL', 'VAR', 'VARIADIC', 'WHILE',
# OO extensions, 16 reserved words
'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
'SUPER', 'TRY',
]
objm2_pervasives = [
# base language, 38 pervasives
'ABS', 'BITSET', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'DISPOSE',
'FALSE', 'HALT', 'HIGH', 'INTEGER', 'INRANGE', 'LENGTH', 'LONGCARD',
'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEG', 'NEW', 'NEXTV', 'NIL',
'OCTET', 'ODD', 'ORD', 'PRED', 'PROC', 'READ', 'REAL', 'SUCC', 'TMAX',
'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'VAL', 'WRITE', 'WRITEF',
# OO extensions, 3 pervasives
'OBJECT', 'NO', 'YES',
]
gnu_reserved_words = [
# 10 additional reserved words
'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
'__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
]
gnu_pervasives = [
# 21 identifiers, actually from pseudo-module SYSTEM
# but we will highlight them as if they were pervasives
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
]
def __init__(self, **options):
self.reserved_words = set()
self.pervasives = set()
# ISO Modula-2
if get_bool_opt(options, 'iso', False):
self.reserved_words.update(self.iso_reserved_words)
self.pervasives.update(self.iso_pervasives)
# Objective Modula-2
elif get_bool_opt(options, 'objm2', False):
self.reserved_words.update(self.objm2_reserved_words)
self.pervasives.update(self.objm2_pervasives)
# PIM Modula-2 (DEFAULT)
else:
self.reserved_words.update(self.pim_reserved_words)
self.pervasives.update(self.pim_pervasives)
# GNU extensions
if get_bool_opt(options, 'gm2ext', False):
self.reserved_words.update(self.gnu_reserved_words)
self.pervasives.update(self.gnu_pervasives)
# initialise
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# check for reserved words and pervasives
if token is Name:
if value in self.reserved_words:
token = Keyword.Reserved
elif value in self.pervasives:
token = Keyword.Pervasive
# return result
yield index, token, value
class BlitzMaxLexer(RegexLexer):
"""
For `BlitzMax <http://blitzbasic.com>`_ source code.
*New in Pygments 1.4.*
"""
name = 'BlitzMax'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_][a-z0-9_]*'
bmax_var = r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)|([ \t]*)([:])([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)' % (bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
(r'\.\.\n', Text), # Line continuation
# Comments
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number), # Binary
# Other
(r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
(bmax_vopwords), Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)),
(r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
(bmax_name, bmax_name),
bygroups(Keyword.Reserved, Text, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Text, Keyword.Type,
Operator, Text, Punctuation, Text,
Keyword.Type, Name.Class, Text,
Keyword.Type, Text, Punctuation)),
(bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator,
Text, Punctuation, Text, Keyword.Type,
Name.Class, Text, Keyword.Type)),
(r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(r'\b(TNullMethodException|TNullFunctionException|'
r'TNullObjectException|TArrayBoundsException|'
r'TRuntimeException)\b', Name.Exception),
(r'\b(Strict|SuperStrict|Module|ModuleInfo|'
r'End|Return|Continue|Exit|Public|Private|'
r'Var|VarPtr|Chr|Len|Asc|SizeOf|Sgn|Abs|Min|Max|'
r'New|Release|Delete|'
r'Incbin|IncbinPtr|IncbinLen|'
r'Framework|Include|Import|Extern|EndExtern|'
r'Function|EndFunction|'
r'Type|EndType|Extends|'
r'Method|EndMethod|'
r'Abstract|Final|'
r'If|Then|Else|ElseIf|EndIf|'
r'For|To|Next|Step|EachIn|'
r'While|Wend|EndWhile|'
r'Repeat|Until|Forever|'
r'Select|Case|Default|EndSelect|'
r'Try|Catch|EndTry|Throw|Assert|'
r'Goto|DefData|ReadData|RestoreData)\b', Keyword.Reserved),
# Final resolve (for variable names and such)
(r'(%s)' % (bmax_name), Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class NimrodLexer(RegexLexer):
"""
For `Nimrod <http://nimrod-code.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Nimrod'
aliases = ['nimrod', 'nim']
filenames = ['*.nim', '*.nimrod']
mimetypes = ['text/x-nimrod']
flags = re.MULTILINE | re.IGNORECASE | re.UNICODE
def underscorize(words):
newWords = []
new = ""
for word in words:
for ch in word:
new += (ch + "_?")
newWords.append(new)
new = ""
return "|".join(newWords)
keywords = [
'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break',
'case', 'cast', 'const', 'continue', 'converter', 'discard',
'distinct', 'div', 'elif', 'else', 'end', 'enum', 'except', 'finally',
'for', 'generic', 'if', 'implies', 'in', 'yield',
'is', 'isnot', 'iterator', 'lambda', 'let', 'macro', 'method',
'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'proc',
'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'template', 'try',
'tuple', 'type' , 'when', 'while', 'with', 'without', 'xor'
]
keywordsPseudo = [
'nil', 'true', 'false'
]
opWords = [
'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in',
'notin', 'is', 'isnot'
]
types = [
'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64',
'bool', 'char', 'range', 'array', 'seq', 'set', 'string'
]
tokens = {
'root': [
(r'##.*$', String.Doc),
(r'#.*$', Comment),
(r'\*|=|>|<|\+|-|/|@|\$|~|&|%|\!|\?|\||\\|\[|\]', Operator),
(r'\.\.|\.|,|\[\.|\.\]|{\.|\.}|\(\.|\.\)|{|}|\(|\)|:|\^|`|;',
Punctuation),
# Strings
(r'(?:[\w]+)"', String, 'rdqs'),
(r'"""', String, 'tdqs'),
('"', String, 'dqs'),
# Char
("'", String.Char, 'chars'),
# Keywords
(r'(%s)\b' % underscorize(opWords), Operator.Word),
(r'(p_?r_?o_?c_?\s)(?![\(\[\]])', Keyword, 'funcname'),
(r'(%s)\b' % underscorize(keywords), Keyword),
(r'(%s)\b' % underscorize(['from', 'import', 'include']),
Keyword.Namespace),
(r'(v_?a_?r)\b', Keyword.Declaration),
(r'(%s)\b' % underscorize(types), Keyword.Type),
(r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo),
# Identifiers
(r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name),
# Numbers
(r'[0-9][0-9_]*(?=([eE.]|\'[fF](32|64)))',
Number.Float, ('float-suffix', 'float-number')),
(r'0[xX][a-fA-F0-9][a-fA-F0-9_]*', Number.Hex, 'int-suffix'),
(r'0[bB][01][01_]*', Number, 'int-suffix'),
(r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'),
(r'[0-9][0-9_]*', Number.Integer, 'int-suffix'),
# Whitespace
(r'\s+', Text),
(r'.+$', Error),
],
'chars': [
(r'\\([\\abcefnrtvl"\']|x[a-fA-F0-9]{2}|[0-9]{1,3})', String.Escape),
(r"'", String.Char, '#pop'),
(r".", String.Char)
],
'strings': [
(r'(?<!\$)\$(\d+|#|\w+)+', String.Interpol),
(r'[^\\\'"\$\n]+', String),
# quotes, dollars and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'\$', String)
# newlines are an error (use "nl" state)
],
'dqs': [
(r'\\([\\abcefnrtvl"\']|\n|x[a-fA-F0-9]{2}|[0-9]{1,3})',
String.Escape),
(r'"', String, '#pop'),
include('strings')
],
'rdqs': [
(r'"(?!")', String, '#pop'),
(r'""', String.Escape),
include('strings')
],
'tdqs': [
(r'"""(?!")', String, '#pop'),
include('strings'),
include('nl')
],
'funcname': [
(r'((?![\d_])\w)(((?!_)\w)|(_(?!_)\w))*', Name.Function, '#pop'),
(r'`.+`', Name.Function, '#pop')
],
'nl': [
(r'\n', String)
],
'float-number': [
(r'\.(?!\.)[0-9_]*', Number.Float),
(r'[eE][+-]?[0-9][0-9_]*', Number.Float),
(r'', Text, '#pop')
],
'float-suffix': [
(r'\'[fF](32|64)', Number.Float),
(r'', Text, '#pop')
],
'int-suffix': [
(r'\'[iI](32|64)', Number.Integer.Long),
(r'\'[iI](8|16)', Number.Integer),
(r'', Text, '#pop')
],
}
class FantomLexer(RegexLexer):
"""
For Fantom source code.
*New in Pygments 1.5.*
"""
name = 'Fantom'
aliases = ['fan']
filenames = ['*.fan']
mimetypes = ['application/x-fantom']
# often used regexes
def s(str):
return Template(str).substitute(
dict (
pod = r'[\"\w\.]+',
eos = r'\n|;',
id = r'[a-zA-Z_][a-zA-Z0-9_]*',
# all chars which can be part of type definition. Starts with
# either letter, or [ (maps), or | (funcs)
type = r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]\|\->\?]*?',
)
)
tokens = {
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline), #Multiline
(r'//.*?\n', Comment.Single), #Single line
#todo: highlight references in fandocs
(r'\*\*.*?\n', Comment.Special), #Fandoc
(r'#.*\n', Comment.Single) #Shell-style
],
'literals': [
(r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), #Duration
(r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number),
#Duration with dot
(r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), #Float/Decimal
(r'\b-?0x[0-9a-fA-F_]+', Number.Hex), #Hex
(r'\b-?[\d_]+', Number.Integer), #Int
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), #Char
(r'"', Punctuation, 'insideStr'), #Opening quote
(r'`', Punctuation, 'insideUri'), #Opening accent
(r'\b(true|false|null)\b', Keyword.Constant), #Bool & null
(r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', #DSL
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, String, Punctuation)),
(r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', #Type/slot literal
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, Name.Function)),
(r'\[,\]', Literal), # Empty list
(s(r'($type)(\[,\])'), # Typed empty list
bygroups(using(this, state = 'inType'), Literal)),
(r'\[:\]', Literal), # Empty Map
(s(r'($type)(\[:\])'),
bygroups(using(this, state = 'inType'), Literal)),
],
'insideStr': [
(r'\\\\', String.Escape), #Escaped backslash
(r'\\"', String.Escape), #Escaped "
(r'\\`', String.Escape), #Escaped `
(r'\$\w+', String.Interpol), #Subst var
(r'\${.*?}', String.Interpol), #Subst expr
(r'"', Punctuation, '#pop'), #Closing quot
(r'.', String) #String content
],
'insideUri': [ #TODO: remove copy/paste str/uri
(r'\\\\', String.Escape), #Escaped backslash
(r'\\"', String.Escape), #Escaped "
(r'\\`', String.Escape), #Escaped `
(r'\$\w+', String.Interpol), #Subst var
(r'\${.*?}', String.Interpol), #Subst expr
(r'`', Punctuation, '#pop'), #Closing tick
(r'.', String.Backtick) #URI content
],
'protectionKeywords': [
(r'\b(public|protected|private|internal)\b', Keyword),
],
'typeKeywords': [
(r'\b(abstract|final|const|native|facet|enum)\b', Keyword),
],
'methodKeywords': [
(r'\b(abstract|native|once|override|static|virtual|final)\b',
Keyword),
],
'fieldKeywords': [
(r'\b(abstract|const|final|native|override|static|virtual|'
r'readonly)\b', Keyword)
],
'otherKeywords': [
(r'\b(try|catch|throw|finally|for|if|else|while|as|is|isnot|'
r'switch|case|default|continue|break|do|return|get|set)\b',
Keyword),
(r'\b(it|this|super)\b', Name.Builtin.Pseudo),
],
'operators': [
(r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator)
],
'inType': [
(r'[\[\]\|\->:\?]', Punctuation),
(s(r'$id'), Name.Class),
(r'', Text, '#pop'),
],
'root': [
include('comments'),
include('protectionKeywords'),
include('typeKeywords'),
include('methodKeywords'),
include('fieldKeywords'),
include('literals'),
include('otherKeywords'),
include('operators'),
(r'using\b', Keyword.Namespace, 'using'), # Using stmt
(r'@\w+', Name.Decorator, 'facet'), # Symbol
(r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class),
'inheritance'), # Inheritance list
### Type var := val
(s(r'($type)([ \t]+)($id)(\s*)(:=)'),
bygroups(using(this, state = 'inType'), Text,
Name.Variable, Text, Operator)),
### var := val
(s(r'($id)(\s*)(:=)'),
bygroups(Name.Variable, Text, Operator)),
### .someId( or ->someId( ###
(s(r'(\.|(?:\->))($id)(\s*)(\()'),
bygroups(Operator, Name.Function, Text, Punctuation),
'insideParen'),
### .someId or ->someId
(s(r'(\.|(?:\->))($id)'),
bygroups(Operator, Name.Function)),
### new makeXXX ( ####
(r'(new)(\s+)(make\w*)(\s*)(\()',
bygroups(Keyword, Text, Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
### Type name ( ####
(s(r'($type)([ \t]+)' #Return type and whitespace
r'($id)(\s*)(\()'), #method name + open brace
bygroups(using(this, state = 'inType'), Text,
Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
### ArgType argName, #####
(s(r'($type)(\s+)($id)(\s*)(,)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
#### ArgType argName) ####
## Covered in 'insideParen' state
### ArgType argName -> ArgType| ###
(s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation, Text, using(this, state = 'inType'),
Punctuation)),
### ArgType argName| ###
(s(r'($type)(\s+)($id)(\s*)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
### Type var
(s(r'($type)([ \t]+)($id)'),
bygroups(using(this, state='inType'), Text,
Name.Variable)),
(r'\(', Punctuation, 'insideParen'),
(r'\{', Punctuation, 'insideBrace'),
(r'.', Text)
],
'insideParen': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'insideMethodDeclArgs': [
(r'\)', Punctuation, '#pop'),
(s(r'($type)(\s+)($id)(\s*)(\))'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation), '#pop'),
include('root'),
],
'insideBrace': [
(r'\}', Punctuation, '#pop'),
include('root'),
],
'inheritance': [
(r'\s+', Text), #Whitespace
(r':|,', Punctuation),
(r'(?:(\w+)(::))?(\w+)',
bygroups(Name.Namespace, Punctuation, Name.Class)),
(r'{', Punctuation, '#pop')
],
'using': [
(r'[ \t]+', Text), # consume whitespaces
(r'(\[)(\w+)(\])',
bygroups(Punctuation, Comment.Special, Punctuation)), #ffi
(r'(\")?([\w\.]+)(\")?',
bygroups(Punctuation, Name.Namespace, Punctuation)), #podname
(r'::', Punctuation, 'usingClass'),
(r'', Text, '#pop')
],
'usingClass': [
(r'[ \t]+', Text), # consume whitespaces
(r'(as)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Name.Class), '#pop:2'),
(r'[\w\$]+', Name.Class),
(r'', Text, '#pop:2') # jump out to root state
],
'facet': [
(r'\s+', Text),
(r'{', Punctuation, 'facetFields'),
(r'', Text, '#pop')
],
'facetFields': [
include('comments'),
include('literals'),
include('operators'),
(r'\s+', Text),
(r'(\s*)(\w+)(\s*)(=)', bygroups(Text, Name, Text, Operator)),
(r'}', Punctuation, '#pop'),
(r'.', Text)
],
}
class RustLexer(RegexLexer):
"""
Lexer for Mozilla's Rust programming language.
*New in Pygments 1.6.*
"""
name = 'Rust'
filenames = ['*.rs', '*.rc']
aliases = ['rust']
mimetypes = ['text/x-rustsrc']
tokens = {
'root': [
# Whitespace and Comments
(r'\n', Text),
(r'\s+', Text),
(r'//(.*?)\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
# Keywords
(r'(alt|as|assert|be|break|check|claim|class|const'
r'|cont|copy|crust|do|else|enum|export|fail'
r'|false|fn|for|if|iface|impl|import|let|log'
r'|loop|mod|mut|native|pure|resource|ret|true'
r'|type|unsafe|use|white|note|bind|prove|unchecked'
r'|with|syntax|u8|u16|u32|u64|i8|i16|i32|i64|uint'
r'|int|f32|f64)\b', Keyword),
# Character Literal
(r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char),
# Binary Literal
(r'0[Bb][01_]+', Number, 'number_lit'),
# Octal Literal
(r'0[0-7_]+', Number.Oct, 'number_lit'),
# Hexadecimal Literal
(r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'),
# Decimal Literal
(r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?'
r'[0-9_]+|\.[0-9_]*|[eE][+\-]?[0-9_]+)?', Number, 'number_lit'),
# String Literal
(r'"', String, 'string'),
# Operators and Punctuation
(r'[{}()\[\],.;]', Punctuation),
(r'[+\-*/%&|<>^!~@=:?]', Operator),
# Identifier
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
# Attributes
(r'#\[', Comment.Preproc, 'attribute['),
(r'#\(', Comment.Preproc, 'attribute('),
# Macros
(r'#[A-Za-z_][A-Za-z0-9_]*\[', Comment.Preproc, 'attribute['),
(r'#[A-Za-z_][A-Za-z0-9_]*\(', Comment.Preproc, 'attribute('),
],
'number_lit': [
(r'(([ui](8|16|32|64)?)|(f(32|64)?))?', Keyword, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape),
(r'[^\\"]+', String),
(r'\\', String),
],
'attribute_common': [
(r'"', String, 'string'),
(r'\[', Comment.Preproc, 'attribute['),
(r'\(', Comment.Preproc, 'attribute('),
],
'attribute[': [
include('attribute_common'),
(r'\];?', Comment.Preproc, '#pop'),
(r'[^"\]]+', Comment.Preproc),
],
'attribute(': [
include('attribute_common'),
(r'\);?', Comment.Preproc, '#pop'),
(r'[^"\)]+', Comment.Preproc),
],
}
class CUDALexer(CLexer):
"""
For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_
source.
*New in Pygments 1.6.*
"""
name = 'CUDA'
filenames = ['*.cu', '*.cuh']
aliases = ['cuda', 'cu']
mimetypes = ['text/x-cuda']
function_qualifiers = ['__device__', '__global__', '__host__',
'__noinline__', '__forceinline__']
variable_qualifiers = ['__device__', '__constant__', '__shared__',
'__restrict__']
vector_types = ['char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3',
'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2',
'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1',
'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1',
'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4',
'ulong4', 'longlong1', 'ulonglong1', 'longlong2',
'ulonglong2', 'float1', 'float2', 'float3', 'float4',
'double1', 'double2', 'dim3']
variables = ['gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize']
functions = ['__threadfence_block', '__threadfence', '__threadfence_system',
'__syncthreads', '__syncthreads_count', '__syncthreads_and',
'__syncthreads_or']
execution_confs = ['<<<', '>>>']
def get_tokens_unprocessed(self, text):
for index, token, value in \
CLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.variable_qualifiers:
token = Keyword.Type
elif value in self.vector_types:
token = Keyword.Type
elif value in self.variables:
token = Name.Builtin
elif value in self.execution_confs:
token = Keyword.Pseudo
elif value in self.function_qualifiers:
token = Keyword.Reserved
elif value in self.functions:
token = Name.Function
yield index, token, value
| bsd-2-clause |
souravsingh/sympy | sympy/matrices/expressions/transpose.py | 21 | 2382 | from __future__ import print_function, division
from sympy import Basic
from sympy.functions import adjoint, conjugate
from sympy.matrices.expressions.matexpr import MatrixExpr
class Transpose(MatrixExpr):
"""
The transpose of a matrix expression.
This is a symbolic object that simply stores its argument without
evaluating it. To actually compute the transpose, use the ``transpose()``
function, or the ``.T`` attribute of matrices.
Examples
========
>>> from sympy.matrices import MatrixSymbol, Transpose
>>> from sympy.functions import transpose
>>> A = MatrixSymbol('A', 3, 5)
>>> B = MatrixSymbol('B', 5, 3)
>>> Transpose(A)
A.T
>>> A.T == transpose(A) == Transpose(A)
True
>>> Transpose(A*B)
(A*B).T
>>> transpose(A*B)
B.T*A.T
"""
is_Transpose = True
def doit(self, **hints):
arg = self.arg
if hints.get('deep', True) and isinstance(arg, Basic):
arg = arg.doit(**hints)
try:
result = arg._eval_transpose()
return result if result is not None else Transpose(arg)
except AttributeError:
return Transpose(arg)
@property
def arg(self):
return self.args[0]
@property
def shape(self):
return self.arg.shape[::-1]
def _entry(self, i, j):
return self.arg._entry(j, i)
def _eval_adjoint(self):
return conjugate(self.arg)
def _eval_conjugate(self):
return adjoint(self.arg)
def _eval_transpose(self):
return self.arg
def _eval_trace(self):
from .trace import Trace
return Trace(self.arg) # Trace(X.T) => Trace(X)
def _eval_determinant(self):
from sympy.matrices.expressions.determinant import det
return det(self.arg)
def transpose(expr):
""" Matrix transpose """
return Transpose(expr).doit()
from sympy.assumptions.ask import ask, Q
from sympy.assumptions.refine import handlers_dict
def refine_Transpose(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine
>>> X = MatrixSymbol('X', 2, 2)
>>> X.T
X.T
>>> with assuming(Q.symmetric(X)):
... print(refine(X.T))
X
"""
if ask(Q.symmetric(expr), assumptions):
return expr.arg
return expr
handlers_dict['Transpose'] = refine_Transpose
| bsd-3-clause |
ykaneko/quantum | quantum/tests/unit/test_agent_netns_cleanup.py | 7 | 10378 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from quantum.agent import netns_cleanup_util as util
from quantum.tests import base
class TestNullDelegate(base.BaseTestCase):
def test_getattribute(self):
null_delegate = util.NullDelegate()
self.assertIsNone(null_delegate.test())
class TestNetnsCleanup(base.BaseTestCase):
def setUp(self):
super(TestNetnsCleanup, self).setUp()
self.addCleanup(cfg.CONF.reset)
def test_kill_dhcp(self, dhcp_active=True):
conf = mock.Mock()
conf.AGENT.root_helper = 'sudo',
conf.dhcp_driver = 'driver'
method_to_patch = 'quantum.openstack.common.importutils.import_object'
with mock.patch(method_to_patch) as import_object:
driver = mock.Mock()
driver.active = dhcp_active
import_object.return_value = driver
util.kill_dhcp(conf, 'ns')
import_object.called_once_with('driver', conf, mock.ANY,
conf.AGENT.root_helper,
mock.ANY)
if dhcp_active:
driver.assert_has_calls([mock.call.disable()])
else:
self.assertFalse(driver.called)
def test_kill_dhcp_no_active(self):
self.test_kill_dhcp(False)
def test_eligible_for_deletion_ns_not_uuid(self):
ns = 'not_a_uuid'
self.assertFalse(util.eligible_for_deletion(mock.Mock(), ns))
def _test_eligible_for_deletion_helper(self, prefix, force, is_empty,
expected):
ns = prefix + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
conf = mock.Mock()
with mock.patch('quantum.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.return_value.namespace_is_empty.return_value = is_empty
self.assertEqual(util.eligible_for_deletion(conf, ns, force),
expected)
expected_calls = [mock.call(conf.AGENT.root_helper, ns)]
if not force:
expected_calls.append(mock.call().namespace_is_empty())
ip_wrap.assert_has_calls(expected_calls)
def test_eligible_for_deletion_empty(self):
self._test_eligible_for_deletion_helper('qrouter-', False, True, True)
def test_eligible_for_deletion_not_empty(self):
self._test_eligible_for_deletion_helper('qdhcp-', False, False, False)
def test_eligible_for_deletion_not_empty_forced(self):
self._test_eligible_for_deletion_helper('qdhcp-', True, False, True)
def test_unplug_device_regular_device(self):
conf = mock.Mock()
device = mock.Mock()
util.unplug_device(conf, device)
device.assert_has_calls([mock.call.link.delete()])
def test_unplug_device_ovs_port(self):
conf = mock.Mock()
conf.ovs_integration_bridge = 'br-int'
device = mock.Mock()
device.name = 'tap1'
device.link.delete.side_effect = RuntimeError
with mock.patch('quantum.agent.linux.ovs_lib.OVSBridge') as ovs_br_cls:
br_patch = mock.patch(
'quantum.agent.linux.ovs_lib.get_bridge_for_iface')
with br_patch as mock_get_bridge_for_iface:
mock_get_bridge_for_iface.return_value = 'br-int'
ovs_bridge = mock.Mock()
ovs_br_cls.return_value = ovs_bridge
util.unplug_device(conf, device)
mock_get_bridge_for_iface.assert_called_once_with(
conf.AGENT.root_helper, 'tap1')
ovs_br_cls.called_once_with('br-int', conf.AGENT.root_helper)
ovs_bridge.assert_has_calls(
[mock.call.delete_port(device.name)])
def test_unplug_device_cannot_determine_bridge_port(self):
conf = mock.Mock()
conf.ovs_integration_bridge = 'br-int'
device = mock.Mock()
device.name = 'tap1'
device.link.delete.side_effect = RuntimeError
with mock.patch('quantum.agent.linux.ovs_lib.OVSBridge') as ovs_br_cls:
br_patch = mock.patch(
'quantum.agent.linux.ovs_lib.get_bridge_for_iface')
with br_patch as mock_get_bridge_for_iface:
with mock.patch.object(util.LOG, 'debug') as debug:
mock_get_bridge_for_iface.return_value = None
ovs_bridge = mock.Mock()
ovs_br_cls.return_value = ovs_bridge
util.unplug_device(conf, device)
mock_get_bridge_for_iface.assert_called_once_with(
conf.AGENT.root_helper, 'tap1')
self.assertEqual(ovs_br_cls.mock_calls, [])
self.assertTrue(debug.called)
def _test_destroy_namespace_helper(self, force, num_devices):
ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
conf = mock.Mock()
# conf.AGENT.root_helper = 'sudo'
lo_device = mock.Mock()
lo_device.name = 'lo'
devices = [lo_device]
while num_devices:
dev = mock.Mock()
dev.name = 'tap%d' % num_devices
devices.append(dev)
num_devices -= 1
with mock.patch('quantum.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.return_value.get_devices.return_value = devices
ip_wrap.return_value.netns.exists.return_value = True
with mock.patch.object(util, 'unplug_device') as unplug:
with mock.patch.object(util, 'kill_dhcp') as kill_dhcp:
util.destroy_namespace(conf, ns, force)
expected = [mock.call(conf.AGENT.root_helper, ns)]
if force:
expected.extend([
mock.call().netns.exists(ns),
mock.call().get_devices(exclude_loopback=True)])
self.assertTrue(kill_dhcp.called)
unplug.assert_has_calls(
[mock.call(conf, d) for d in
devices[1:]])
expected.append(mock.call().garbage_collect_namespace())
ip_wrap.assert_has_calls(expected)
def test_destroy_namespace_empty(self):
self._test_destroy_namespace_helper(False, 0)
def test_destroy_namespace_not_empty(self):
self._test_destroy_namespace_helper(False, 1)
def test_destroy_namespace_not_empty_forced(self):
self._test_destroy_namespace_helper(True, 2)
def test_destroy_namespace_exception(self):
ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
conf = mock.Mock()
conf.AGENT.root_helper = 'sudo'
with mock.patch('quantum.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.side_effect = Exception()
util.destroy_namespace(conf, ns)
def test_main(self):
namespaces = ['ns1', 'ns2']
with mock.patch('quantum.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.get_namespaces.return_value = namespaces
with mock.patch('eventlet.sleep') as eventlet_sleep:
conf = mock.Mock()
conf.force = False
methods_to_mock = dict(
eligible_for_deletion=mock.DEFAULT,
destroy_namespace=mock.DEFAULT,
setup_conf=mock.DEFAULT)
with mock.patch.multiple(util, **methods_to_mock) as mocks:
mocks['eligible_for_deletion'].return_value = True
mocks['setup_conf'].return_value = conf
with mock.patch('quantum.common.config.setup_logging'):
util.main()
mocks['eligible_for_deletion'].assert_has_calls(
[mock.call(conf, 'ns1', False),
mock.call(conf, 'ns2', False)])
mocks['destroy_namespace'].assert_has_calls(
[mock.call(conf, 'ns1', False),
mock.call(conf, 'ns2', False)])
ip_wrap.assert_has_calls(
[mock.call.get_namespaces(conf.AGENT.root_helper)])
eventlet_sleep.assert_called_once_with(2)
def test_main_no_candidates(self):
namespaces = ['ns1', 'ns2']
with mock.patch('quantum.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.get_namespaces.return_value = namespaces
with mock.patch('eventlet.sleep') as eventlet_sleep:
conf = mock.Mock()
conf.force = False
methods_to_mock = dict(
eligible_for_deletion=mock.DEFAULT,
destroy_namespace=mock.DEFAULT,
setup_conf=mock.DEFAULT)
with mock.patch.multiple(util, **methods_to_mock) as mocks:
mocks['eligible_for_deletion'].return_value = False
mocks['setup_conf'].return_value = conf
with mock.patch('quantum.common.config.setup_logging'):
util.main()
ip_wrap.assert_has_calls(
[mock.call.get_namespaces(conf.AGENT.root_helper)])
mocks['eligible_for_deletion'].assert_has_calls(
[mock.call(conf, 'ns1', False),
mock.call(conf, 'ns2', False)])
self.assertFalse(mocks['destroy_namespace'].called)
self.assertFalse(eventlet_sleep.called)
| apache-2.0 |
openvapour/ryu | ryu/contrib/ncclient/operations/__init__.py | 65 | 1469 | # Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from errors import OperationError, TimeoutExpiredError, MissingCapabilityError
from rpc import RPC, RPCReply, RPCError, RaiseMode
# rfc4741 ops
from retrieve import Get, GetConfig, GetReply, Dispatch
from edit import EditConfig, CopyConfig, DeleteConfig, Validate, Commit, DiscardChanges
from session import CloseSession, KillSession
from lock import Lock, Unlock, LockContext
# others...
from flowmon import PoweroffMachine, RebootMachine
__all__ = [
'RPC',
'RPCReply',
'RPCError',
'RaiseMode',
'Get',
'GetConfig',
'Dispatch',
'GetReply',
'EditConfig',
'CopyConfig',
'Validate',
'Commit',
'DiscardChanges',
'DeleteConfig',
'Lock',
'Unlock',
'PoweroffMachine',
'RebootMachine',
'LockContext',
'CloseSession',
'KillSession',
'OperationError',
'TimeoutExpiredError',
'MissingCapabilityError'
]
| apache-2.0 |
zhangpanrobot/myblog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/external/rst-directive-old.py | 118 | 2518 | # -*- coding: utf-8 -*-
"""
The Pygments reStructuredText directive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.4 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
| mit |
Kjwon15/earthreader-web | earthreader/web/__init__.py | 3 | 26309 | """:mod:`earthreader.web` --- Earth Reader for Web
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import datetime
import os
from flask import Flask, jsonify, render_template, request, url_for
from libearth.codecs import Rfc3339
from libearth.compat import text_type
from libearth.crawler import crawl, open_url
from libearth.parser.autodiscovery import autodiscovery, FeedUrlNotFoundError
from libearth.subscribe import Category, Subscription, SubscriptionList
from libearth.tz import now, utc
from .util import autofix_repo_url, get_hash
from .wsgi import MethodRewriteMiddleware
from .exceptions import (InvalidCategoryID, IteratorNotFound, WorkerNotRunning,
FeedNotFound, EntryNotFound)
from .worker import Worker
from .stage import stage
app = Flask(__name__)
app.wsgi_app = MethodRewriteMiddleware(app.wsgi_app)
app.config.update(
ALLFEED='All Feeds',
SESSION_ID=None,
PAGE_SIZE=20,
CRAWLER_THREAD=4,
USE_WORKER=True,
)
# Load EARTHREADER_REPOSITORY environment variable if present.
try:
app.config['REPOSITORY'] = os.environ['EARTHREADER_REPOSITORY']
except KeyError:
pass
worker = Worker(app)
@app.before_first_request
def initialize():
if 'REPOSITORY' in app.config:
app.config['REPOSITORY'] = autofix_repo_url(app.config['REPOSITORY'])
if app.config['USE_WORKER']:
worker.start_worker()
class Cursor():
def __init__(self, category_id, return_parent=False):
with stage:
self.subscriptionlist = (stage.subscriptions if stage.subscriptions
else SubscriptionList())
self.value = self.subscriptionlist
self.path = ['/']
self.category_id = None
target_name = None
self.target_child = None
try:
if category_id:
self.category_id = category_id
self.path = [key[1:] for key in category_id.split('/')]
if return_parent:
target_name = self.path.pop(-1)
for key in self.path:
self.value = self.value.categories[key]
if target_name:
self.target_child = self.value.categories[target_name]
except Exception:
raise InvalidCategoryID('The given category ID is not valid')
def __getattr__(self, attr):
return getattr(self.value, attr)
def __iter__(self):
return iter(self.value)
def join_id(self, append):
if self.category_id:
return self.category_id + '/-' + append
return '-' + append
def add_urls(data, keys, category_id, feed_id=None, entry_id=None):
APIS = {
'entries_url': 'category_entries',
'feeds_url': 'feeds',
'add_feed_url': 'add_feed',
'add_category_url': 'add_category',
'remove_category_url': 'delete_category',
'move_url': 'move_outline',
}
if feed_id is not None:
APIS.update(
entries_url='feed_entries', # overwrite
remove_feed_url='delete_feed',
)
if entry_id is not None:
APIS.update(
entry_url='feed_entry',
read_url='read_entry',
unread_url='unread_entry',
star_url='star_entry',
unstar_url='unstar_entry',
)
urls = {}
for key in keys:
if key in APIS:
urls[key] = url_for(
APIS[key],
category_id=category_id,
feed_id=feed_id,
entry_id=entry_id,
_external=True
)
data.update(urls)
def add_path_data(data, category_id, feed_id=''):
path = ''
if category_id:
path = category_id
if feed_id:
path = path + '/feeds/' + feed_id
data.update({'path': path})
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/feeds/', defaults={'category_id': ''})
@app.route('/<path:category_id>/feeds/')
def feeds(category_id):
cursor = Cursor(category_id)
feeds = []
categories = []
for child in cursor:
data = {'title': child.label}
if isinstance(child, Subscription):
url_keys = ['entries_url', 'remove_feed_url']
add_urls(data, url_keys, cursor.category_id, child.feed_id)
add_path_data(data, cursor.category_id, child.feed_id)
feeds.append(data)
elif isinstance(child, Category):
url_keys = ['feeds_url', 'entries_url', 'add_feed_url',
'add_category_url', 'remove_category_url', 'move_url']
add_urls(data, url_keys, cursor.join_id(child.label))
add_path_data(data, cursor.join_id(child.label))
categories.append(data)
return jsonify(feeds=feeds, categories=categories)
@app.route('/feeds/', methods=['POST'], defaults={'category_id': ''})
@app.route('/<path:category_id>/feeds/', methods=['POST'])
def add_feed(category_id):
cursor = Cursor(category_id)
url = request.form['url']
try:
f = open_url(url)
document = f.read()
f.close()
except Exception:
r = jsonify(
error='unreachable-url',
message='Cannot connect to given url'
)
r.status_code = 400
return r
try:
feed_links = autodiscovery(document, url)
except FeedUrlNotFoundError:
r = jsonify(
error='unreachable-feed-url',
message='Cannot find feed url'
)
r.status_code = 400
return r
feed_url = feed_links[0].url
feed_url, feed, hints = next(iter(crawl([feed_url], 1)))
with stage:
sub = cursor.subscribe(feed)
stage.subscriptions = cursor.subscriptionlist
stage.feeds[sub.feed_id] = feed
return feeds(category_id)
@app.route('/', methods=['POST'], defaults={'category_id': ''})
@app.route('/<path:category_id>/', methods=['POST'])
def add_category(category_id):
cursor = Cursor(category_id)
title = request.form['title']
outline = Category(label=title)
cursor.add(outline)
with stage:
stage.subscriptions = cursor.subscriptionlist
return feeds(category_id)
@app.route('/<path:category_id>/', methods=['DELETE'])
def delete_category(category_id):
cursor = Cursor(category_id, True)
cursor.remove(cursor.target_child)
with stage:
stage.subscriptions = cursor.subscriptionlist
index = category_id.rfind('/')
if index == -1:
return feeds('')
else:
return feeds(category_id[:index])
@app.route('/feeds/<feed_id>/', methods=['DELETE'],
defaults={'category_id': ''})
@app.route('/<path:category_id>/feeds/<feed_id>/', methods=['DELETE'])
def delete_feed(category_id, feed_id):
cursor = Cursor(category_id)
target = None
for subscription in cursor:
if isinstance(subscription, Subscription):
if feed_id == subscription.feed_id:
target = subscription
if target:
cursor.discard(target)
else:
r = jsonify(
error='feed-not-found-in-path',
message='Given feed does not exist in the path'
)
r.status_code = 400
return r
with stage:
stage.subscriptions = cursor.subscriptionlist
return feeds(category_id)
@app.route('/<path:category_id>/feeds/', methods=['PUT'])
@app.route('/feeds/', methods=['PUT'], defaults={'category_id': ''})
def move_outline(category_id):
source_path = request.args.get('from')
if '/feeds/' in source_path:
parent_category_id, feed_id = source_path.split('/feeds/')
source = Cursor(parent_category_id)
target = None
for child in source:
if child.feed_id == feed_id:
target = child
else:
source = Cursor(source_path, True)
target = source.target_child
dest = Cursor(category_id)
if isinstance(target, Category) and target.contains(dest.value):
r = jsonify(
error='circular-reference',
message='Cannot move into child element.'
)
r.status_code = 400
return r
source.discard(target)
with stage:
stage.subscriptions = source.subscriptionlist
dest = Cursor(category_id)
dest.add(target)
with stage:
stage.subscriptions = dest.subscriptionlist
return jsonify()
entry_generators = {}
def tidy_generators_up():
global entry_generators
generators = []
for key, (it, time_saved) in entry_generators.items():
if time_saved >= now() - datetime.timedelta(minutes=30):
generators.append((key, (it, time_saved)))
generators = sorted(generators, key=lambda generator: generator[1][1],
reverse=True)
entry_generators = dict(generators[:10])
def to_bool(str_):
return str_.strip().lower() == 'true'
def get_optional_args():
url_token = request.args.get('url_token')
entry_after = request.args.get('entry_after')
read = request.args.get('read')
starred = request.args.get('starred')
return url_token, entry_after, read, starred
def save_entry_generators(url_token, generator):
entry_generators[url_token] = generator, now()
def get_entry_generator(url_token):
pair = entry_generators.get(url_token)
if pair:
it = pair[0]
return it
else:
raise IteratorNotFound('The iterator does not exist')
def remove_entry_generator(url_token):
if url_token in entry_generators:
entry_generators.pop(url_token)
def get_permalink(data):
link = data.links.permalink
return link and link.uri or data.id
def make_next_url(category_id, url_token, entry_after, read, starred,
feed_id=None):
return url_for(
'feed_entries' if feed_id else 'category_entries',
category_id=category_id,
feed_id=feed_id,
url_token=url_token,
entry_after=entry_after,
read=read,
starred=starred
)
class FeedEntryGenerator():
def __init__(self, category_id, feed_id, feed_title, feed_permalink, it,
time_used, read, starred):
self.category_id = category_id
self.feed_id = feed_id
self.feed_title = feed_title
self.feed_permalink = feed_permalink
self.it = it
self.time_used = time_used
self.filters = 'read', 'starred'
self.read = read
self.starred = starred
self.entry = None
self.find_next_entry()
def next(self):
return next(self.it)
def __next__(self):
return next(self.it)
def set_iterator(self, entry_after=None):
if entry_after:
self.skip_to_next_entry(entry_after)
self.skip_until_filter_matched()
def skip_to_next_entry(self, latest_entry_id):
while latest_entry_id and get_hash(self.entry.id) != latest_entry_id:
self.entry = next(self.it)
self.entry = next(self.it)
def skip_until_filter_matched(self):
while self.filter_not_matched():
self.entry = next(self.it)
def filter_not_matched(self):
for filter in self.filters:
arg = getattr(self, filter)
if arg and to_bool(arg) != bool(getattr(self.entry, filter)):
return True
return False
def find_next_entry(self):
self.entry = next(self.it)
while self.filter_not_matched():
self.entry = next(self.it)
def get_entry_data(self):
if not self.entry:
raise StopIteration
entry_permalink = get_permalink(self.entry)
entry_data = {
'title': text_type(self.entry.title),
'entry_id': get_hash(self.entry.id),
'permalink': entry_permalink or None,
'updated': Rfc3339().encode(self.entry.updated_at.astimezone(utc)),
'read': bool(self.entry.read),
'starred': bool(self.entry.starred)
}
feed_data = {
'title': self.feed_title,
'permalink': self.feed_permalink or None
}
add_urls(entry_data, ['entry_url'], self.category_id, self.feed_id,
get_hash(self.entry.id))
add_urls(feed_data, ['entries_url'], self.category_id, self.feed_id)
entry_data['feed'] = feed_data
return entry_data
def get_entries(self):
entries = []
while len(entries) < app.config['PAGE_SIZE']:
try:
entry = self.get_entry_data()
entries.append(entry)
self.find_next_entry()
except StopIteration:
self.entry = None
return entries
return entries
@app.route('/feeds/<feed_id>/entries/', defaults={'category_id': ''})
@app.route('/<path:category_id>/feeds/<feed_id>/entries/')
def feed_entries(category_id, feed_id):
try:
Cursor(category_id)
except InvalidCategoryID:
r = jsonify(
error='category-id-invalid',
message='Given category does not exist'
)
r.status_code = 404
return r
try:
with stage:
feed = stage.feeds[feed_id]
except KeyError:
r = jsonify(
error='feed-not-found',
message='Given feed does not exist'
)
r.status_code = 404
return r
if feed.__revision__:
updated_at = feed.__revision__.updated_at
if request.if_modified_since:
if_modified_since = request.if_modified_since.replace(tzinfo=utc)
last_modified = updated_at.replace(microsecond=0)
if if_modified_since >= last_modified:
return '', 304, {} # Not Modified
else:
updated_at = None
if worker.is_running():
crawl_url = url_for('update_entries',
category_id=category_id,
feed_id=feed_id)
else:
crawl_url = None
url_token, entry_after, read, starred = get_optional_args()
generator = None
if url_token:
try:
generator = get_entry_generator(url_token)
except IteratorNotFound:
pass
else:
url_token = text_type(now())
if not generator:
it = iter(feed.entries)
feed_title = text_type(feed.title)
feed_permalink = get_permalink(feed)
try:
generator = FeedEntryGenerator(category_id, feed_id, feed_title,
feed_permalink, it, now(), read,
starred)
generator.set_iterator(entry_after)
except StopIteration:
return jsonify(
title=feed_title,
entries=[],
next_url=None,
read_url=url_for('read_all_entries',
feed_id=feed_id,
last_updated=(updated_at or
now()).isoformat(),
_external=True),
crawl_url=crawl_url
)
save_entry_generators(url_token, generator)
tidy_generators_up()
entries = generator.get_entries()
if len(entries) < app.config['PAGE_SIZE']:
next_url = None
if not entries:
remove_entry_generator(url_token)
else:
next_url = make_next_url(
category_id,
url_token,
entries[-1]['entry_id'],
read,
starred,
feed_id
)
response = jsonify(
title=text_type(feed.title),
entries=entries,
next_url=next_url,
read_url=url_for('read_all_entries',
feed_id=feed_id,
last_updated=(updated_at or now()).isoformat(),
_external=True),
crawl_url=crawl_url
)
if feed.__revision__:
response.last_modified = updated_at
return response
class CategoryEntryGenerator():
def __init__(self):
self.generators = []
def add(self, feed_entry_generator):
if not isinstance(feed_entry_generator, FeedEntryGenerator):
raise TypeError(
'feed_entry_generator must be a subtype of'
'{0.__module__}.{0.__name__}, not {1!r}'.format(
FeedEntryGenerator, feed_entry_generator)
)
self.generators.append(feed_entry_generator)
def sort_generators(self):
self.generators = sorted(self.generators, key=lambda generator:
generator.entry.updated_at, reverse=True)
def remove_if_iterator_ends(self, generator):
try:
generator.find_next_entry()
except StopIteration:
self.generators.remove(generator)
def set_generators(self, entry_after, time_after):
empty_generators = []
for generator in self.generators:
while (self.entry_newer_than_timestamp(generator, time_after) or
self.entry_same_as_latest_entry(generator, entry_after)):
try:
generator.find_next_entry()
except StopIteration:
empty_generators.append(generator)
break
for generator in empty_generators:
self.generators.remove(generator)
self.sort_generators()
def entry_newer_than_timestamp(self, generator, time_after):
return (time_after and
generator.entry.updated_at > Rfc3339().decode(time_after))
def entry_same_as_latest_entry(self, generator, entry_id):
return generator.entry.id == entry_id
def find_next_generator(self):
while self.generators:
self.sort_generators()
latest = self.generators[0]
yield latest
self.remove_if_iterator_ends(latest)
def get_entries(self):
entries = []
generator_generator = self.find_next_generator()
while len(entries) < app.config['PAGE_SIZE']:
try:
generator = next(generator_generator)
entry_data = generator.get_entry_data()
entries.append(entry_data)
except StopIteration:
return entries
self.remove_if_iterator_ends(generator)
return entries
@app.route('/entries/', defaults={'category_id': ''})
@app.route('/<path:category_id>/entries/')
def category_entries(category_id):
cursor = Cursor(category_id)
generator = None
url_token, entry_after, read, starred = get_optional_args()
if url_token:
try:
generator = get_entry_generator(url_token)
except IteratorNotFound:
pass
else:
url_token = text_type(now())
if not generator:
subscriptions = cursor.recursive_subscriptions
generator = CategoryEntryGenerator()
if entry_after:
id_after, time_after = entry_after.split('@')
else:
time_after = None
id_after = None
for subscription in subscriptions:
try:
with stage:
feed = stage.feeds[subscription.feed_id]
except KeyError:
continue
feed_title = text_type(feed.title)
it = iter(feed.entries)
feed_permalink = get_permalink(feed)
try:
child = FeedEntryGenerator(category_id, subscription.feed_id,
feed_title, feed_permalink, it,
now(), read, starred)
except StopIteration:
continue
generator.add(child)
generator.set_generators(id_after, time_after)
save_entry_generators(url_token, generator)
tidy_generators_up()
entries = generator.get_entries()
if not entries or len(entries) < app.config['PAGE_SIZE']:
next_url = None
if not entries:
remove_entry_generator(url_token)
else:
entry_after = entries[-1]['entry_id'] + '@' + entries[-1]['updated']
next_url = make_next_url(category_id, url_token, entry_after, read,
starred)
# FIXME: use Entry.updated_at instead of from json data.
codec = Rfc3339()
last_updated_at = ''
if len(entries) and not entry_after:
last_updated_at = max(codec.decode(x['updated'])
for x in entries).isoformat()
if worker.is_running():
crawl_url = url_for('update_entries', category_id=category_id),
else:
crawl_url = None
return jsonify(
title=category_id.split('/')[-1][1:] or app.config['ALLFEED'],
entries=entries,
read_url=url_for('read_all_entries', category_id=category_id,
last_updated=last_updated_at,
_external=True),
crawl_url=crawl_url,
next_url=next_url
)
@app.route('/feeds/<feed_id>/entries/', defaults={'category_id': ''},
methods=['PUT'])
@app.route('/<path:category_id>/feeds/<feed_id>/entries/', methods=['PUT'])
@app.route('/entries/', defaults={'category_id': ''}, methods=['PUT'])
@app.route('/<path:category_id>/entries/', methods=['PUT'])
def update_entries(category_id, feed_id=None):
if worker.is_running():
cursor = Cursor(category_id)
worker.add_job(cursor, feed_id)
r = jsonify()
r.status_code = 202
return r
else:
raise WorkerNotRunning('Worker thread is not running.')
def find_feed_and_entry(feed_id, entry_id):
try:
with stage:
feed = stage.feeds[feed_id]
except KeyError:
raise FeedNotFound('The feed is not reachable')
feed_permalink = get_permalink(feed)
for entry in feed.entries:
entry_permalink = get_permalink(entry)
if entry_id == get_hash(entry.id):
return feed, feed_permalink, entry, entry_permalink
raise EntryNotFound('The entry is not reachable')
@app.route('/feeds/<feed_id>/entries/<entry_id>/',
defaults={'category_id': ''})
@app.route('/<path:category_id>/feeds/<feed_id>/entries/<entry_id>/')
def feed_entry(category_id, feed_id, entry_id):
feed, feed_permalink, entry, entry_permalink = \
find_feed_and_entry(feed_id, entry_id)
content = entry.content or entry.summary
if content is not None:
content = content.sanitized_html
entry_data = {
'title': text_type(entry.title),
'content': content,
'updated': text_type(entry.updated_at),
'permalink': entry_permalink or None,
}
feed_data = {
'title': text_type(feed.title),
'permalink': feed_permalink or None
}
add_urls(
entry_data,
['read_url', 'unread_url', 'star_url', 'unstar_url'],
category_id,
feed_id,
entry_id
)
add_urls(
feed_data,
['entries_url'],
category_id,
feed_id
)
entry_data['feed'] = feed_data
return jsonify(entry_data)
@app.route('/feeds/<feed_id>/entries/<entry_id>/read/',
defaults={'category_id': ''}, methods=['PUT'])
@app.route('/<path:category_id>/feeds/<feed_id>/entries/<entry_id>/read/',
methods=['PUT'])
def read_entry(category_id, feed_id, entry_id):
feed, _, entry, _ = find_feed_and_entry(feed_id, entry_id)
entry.read = True
with stage:
stage.feeds[feed_id] = feed
return jsonify()
@app.route('/feeds/<feed_id>/entries/<entry_id>/read/',
defaults={'category_id': ''}, methods=['DELETE'])
@app.route('/<path:category_id>/feeds/<feed_id>/entries/<entry_id>/read/',
methods=['DELETE'])
def unread_entry(category_id, feed_id, entry_id):
feed, _, entry, _ = find_feed_and_entry(feed_id, entry_id)
entry.read = False
with stage:
stage.feeds[feed_id] = feed
return jsonify()
@app.route('/feeds/<feed_id>/entries/read/', methods=['PUT'])
@app.route('/<path:category_id>/feeds/<feed_id>/entries/read/',
methods=['PUT'])
@app.route('/entries/read/', methods=['PUT'])
@app.route('/<path:category_id>/entries/read/', methods=['PUT'])
def read_all_entries(category_id='', feed_id=None):
if feed_id:
feed_ids = [feed_id]
else:
cursor = Cursor(category_id)
feed_ids = [sub.feed_id for sub in cursor.recursive_subscriptions]
try:
codec = Rfc3339()
last_updated = codec.decode(request.args.get('last_updated'))
except:
last_updated = None
for feed_id in feed_ids:
try:
with stage:
feed = stage.feeds[feed_id]
for entry in feed.entries:
if not last_updated or entry.updated_at <= last_updated:
entry.read = True
stage.feeds[feed_id] = feed
except KeyError:
if feed_id:
r = jsonify(
error='feed-not-found',
message='Given feed does not exist'
)
r.status_code = 404
return r
else:
continue
return jsonify()
@app.route('/feeds/<feed_id>/entries/<entry_id>/star/',
defaults={'category_id': ''}, methods=['PUT'])
@app.route('/<path:category_id>/feeds/<feed_id>/entries/<entry_id>/star/',
methods=['PUT'])
def star_entry(category_id, feed_id, entry_id):
feed, _, entry, _ = find_feed_and_entry(feed_id, entry_id)
entry.starred = True
with stage:
stage.feeds[feed_id] = feed
return jsonify()
@app.route('/feeds/<feed_id>/entries/<entry_id>/star/',
defaults={'category_id': ''}, methods=['DELETE'])
@app.route('/<path:category_id>/feeds/<feed_id>/entries/<entry_id>/star/',
methods=['DELETE'])
def unstar_entry(category_id, feed_id, entry_id):
feed, _, entry, _ = find_feed_and_entry(feed_id, entry_id)
entry.starred = False
with stage:
stage.feeds[feed_id] = feed
return jsonify()
| agpl-3.0 |
renaldopringle/python_koans | python3/koans/about_regex.py | 79 | 4803 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import re
class AboutRegex(Koan):
"""
These koans are based on the Ben's book: Regular Expressions in 10 minutes.
I found this books very useful so I decided to write a koans in order to practice everything I had learned from it.
http://www.forta.com/books/0672325667/
"""
def test_matching_literal_text(self):
"""
Lesson 1 Matching Literal String
"""
string = "Hello, my name is Felix and this koans are based on the Ben's book: Regular Expressions in 10 minutes."
m = re.search(__, string)
self.assertTrue(m and m.group(0) and m.group(0)== 'Felix', "I want my name")
def test_matching_literal_text_how_many(self):
"""
Lesson 1 How many matches?
The default behaviour of most regular expression engines is to return just the first match.
In python you have the next options:
match() --> Determine if the RE matches at the beginning of the string.
search() --> Scan through a string, looking for any location where this RE matches.
findall() --> Find all substrings where the RE matches, and returns them as a list.
finditer() --> Find all substrings where the RE matches, and returns them as an iterator.
"""
string = "Hello, my name is Felix and this koans are based on the Ben's book: Regular Expressions in 10 minutes. Repeat My name is Felix"
m = re.match('Felix', string) #TIP: Maybe match it's not the best option
# I want to know how many times appears my name
self.assertEqual(m, __)
def test_matching_literal_text_not_case_sensitivity(self):
"""
Lesson 1 Matching Literal String non case sensitivity.
Most regex implementations also support matches that are not case sensitive. In python you can use re.IGNORECASE, in
Javascript you can specify the optional i flag.
In Ben's book you can see more languages.
"""
string = "Hello, my name is Felix or felix and this koans is based on the Ben's book: Regular Expressions in 10 minutes."
self.assertEqual(re.findall("felix", string), __)
self.assertEqual(re.findall("felix", string, re.IGNORECASE), __)
def test_matching_any_character(self):
"""
Lesson 1 Matching any character
. matches any character, alphabetic characters, digits and .
"""
string = "pecks.xlx\n" \
+ "orders1.xls\n" \
+ "apec1.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls"
# TIP: remember the name of this lesson
change_this_search_string = 'a..xlx' # <-- I want to find all uses of myArray
self.assertEquals(len(re.findall(change_this_search_string, string)),3)
def test_matching_set_character(self):
"""
Lesson 2 Matching sets of characters
A set of characters is defined using the metacharacters [ and ]. Everything between them is part of the set and
any one of the set members must match (but not all).
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find all files for North America(na) or South America(sa), but not (ca)
# TIP you can use the pattern .a. which matches in above test but in this case matches more than you want
change_this_search_string = '[nsc]a[2-9].xls'
self.assertEquals(len(re.findall(change_this_search_string, string)),3)
def test_anything_but_matching(self):
"""
Lesson 2 Using character set ranges
Occasionally, you'll want a list of characters that you don't want to match.
Character sets can be negated using the ^ metacharacter.
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "sales3.xls\n" \
+ "europe2.xls\n" \
+ "sam.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find the name sam
change_this_search_string = '[^nc]am'
self.assertEquals(re.findall(change_this_search_string, string), ['sam.xls'])
| mit |
sekikn/ambari | ambari-server/src/main/resources/scripts/takeover_config_merge.py | 3 | 24177 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import optparse
import sys
import os
import logging
import tempfile
import json
import re
import base64
import time
import xml
import xml.etree.ElementTree as ET
import StringIO
import ConfigParser
from optparse import OptionGroup
logger = logging.getLogger('AmbariTakeoverConfigMerge')
CONFIG_MAPPING_HELP_TEXT = """
JSON file should content map with {regex_path : <service>-log4j}
Example:
{".+/hadoop/.+/log4j.properties" : "hdfs-log4j",
".+/etc/zookeeper/conf/log4j.properties" : "zookeeper-log4j"
"c6401.ambari.apache.org/etc/hive/conf/log4j.properties" : "hive-log4j"}
"""
LICENSE = """
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
class Parser:
pass
class ShParser(Parser):
def read_data_to_map(self, path):
with open(path, 'r') as file:
file_content = file.read()
return {"content" : file_content}, None
class YamlParser(Parser): # Used Yaml parser to read data into a map
def read_data_to_map(self, path):
try:
import yaml
except ImportError:
logger.error("Module PyYAML not installed. Please try to execute \"pip install pyyaml\" for installing PyYAML module.")
sys.exit(1)
configurations = {}
with open(path, 'r') as file:
try:
for name, value in yaml.load(file).iteritems():
if name != None:
configurations[name] = str(value)
except:
logger.error("Couldn't parse {0} file. Skipping ...".format(path))
return None, None
return configurations, None
class PropertiesParser(Parser): # Used ConfigParser parser to read data into a map
def read_data_to_map(self, path):
configurations = {}
try :
#Adding dummy section to properties file content for use ConfigParser
properties_file_content = StringIO.StringIO()
properties_file_content.write('[dummysection]\n')
properties_file_content.write(open(path).read())
properties_file_content.seek(0, os.SEEK_SET)
cp = ConfigParser.ConfigParser()
cp.optionxform = str
cp.readfp(properties_file_content)
for section in cp._sections:
for name, value in cp._sections[section].iteritems():
if name != None:
configurations[name] = value
del configurations['__name__']
except:
logger.exception("ConfigParser error: ")
return configurations, None
class XmlParser(Parser): # Used DOM parser to read data into a map
def read_data_to_map(self, path):
configurations = {}
properties_attributes = {}
tree = ET.parse(path)
root = tree.getroot()
for properties in root.getiterator('property'):
name = properties.find('name')
value = properties.find('value')
#TODO support all properties attributes
final = properties.find('final')
if name != None:
name_text = name.text if name.text else ""
else:
logger.warn("No name is found for one of the properties in {0}, ignoring it".format(path))
continue
if value != None:
value_text = value.text if value.text else ""
else:
logger.warn("No value is found for \"{0}\" in {1}, using empty string for it".format(name_text, path))
value_text = ""
if final != None:
final_text = final.text if final.text else ""
properties_attributes[name_text] = final_text
configurations[name_text] = value_text
logger.debug("Following configurations found in {0}:\n{1}".format(path, configurations))
return configurations, properties_attributes
class ConfigMerge:
CONTENT_UNKNOWN_FILES_MAPPING_FILE = {}
LEFT_INPUT_DIR = "/tmp/left"
RIGHT_INPUT_DIR = "/tmp/right"
INPUT_DIR = '/etc/hadoop'
OUTPUT_DIR = '/tmp'
OUT_FILENAME = 'ambari_takeover_config_merge.out'
JSON_FILENAME = 'ambari_takeover_config_merge.json'
PARSER_BY_EXTENSIONS = {'.xml' : XmlParser(), '.yaml' : YamlParser(), '.properties' : PropertiesParser(), '.sh' : ShParser()}
SUPPORTED_EXTENSIONS = ['.xml', '.yaml', '.properties', '.sh']
SUPPORTED_FILENAME_ENDINGS = {".sh" : "-env"}
UNKNOWN_FILES_MAPPING_FILE = None
CONFIGS_WITH_CONTENT = ['pig-properties', '-log4j']
NOT_MAPPED_FILES = ['log4j.properties']
config_files_map = {}
left_file_paths = None
right_file_paths = None
def __init__(self, config_files_map=None, left_file_paths=None, right_file_paths=None):
self.config_files_map = config_files_map
self.left_file_paths = left_file_paths
self.right_file_paths = right_file_paths
@staticmethod
def get_all_supported_files_grouped_by_name(extensions=SUPPORTED_EXTENSIONS, directory=INPUT_DIR):
filePaths = {}
for dirName, subdirList, fileList in os.walk(directory, followlinks=True):
for file in fileList:
root, ext = os.path.splitext(file)
if ext in extensions:
file_path = os.path.join(dirName, file)
if ext in ConfigMerge.SUPPORTED_FILENAME_ENDINGS and not ConfigMerge.SUPPORTED_FILENAME_ENDINGS[ext] in root:
logger.warn("File {0} is not configurable by Ambari. Skipping...".format(file_path))
continue
config_name = None
if ConfigMerge.UNKNOWN_FILES_MAPPING_FILE:
for path_regex, name in ConfigMerge.CONTENT_UNKNOWN_FILES_MAPPING_FILE.iteritems():
match = re.match(path_regex, os.path.relpath(file_path, ConfigMerge.INPUT_DIR))
if match:
config_name = name
break
if not config_name:
if file in ConfigMerge.NOT_MAPPED_FILES:
if ConfigMerge.UNKNOWN_FILES_MAPPING_FILE:
logger.error("File {0} doesn't match any regex from {1}".format(file_path, ConfigMerge.UNKNOWN_FILES_MAPPING_FILE))
else:
logger.error("Cannot map {0} to Ambari config type. Please use -u option to specify config mapping for this file. \n"
"For more information use --help option for script".format(file_path))
continue
else:
config_name = file
if not config_name in filePaths:
filePaths[config_name] = []
filePaths[config_name].append((file_path, ConfigMerge.PARSER_BY_EXTENSIONS[ext]))
return filePaths
@staticmethod
def merge_configurations(filepath_to_configurations):
configuration_information_dict = {}
property_name_to_value_to_filepaths = {}
merged_configurations = {}
for path, configurations in filepath_to_configurations.iteritems():
for configuration_name, value in configurations.iteritems():
if not configuration_name in property_name_to_value_to_filepaths:
property_name_to_value_to_filepaths[configuration_name] = {}
if not value in property_name_to_value_to_filepaths[configuration_name]:
property_name_to_value_to_filepaths[configuration_name][value] = []
logger.debug("Iterating over '{0}' with value '{1}' in file '{2}'".format(configuration_name, value, path))
property_name_to_value_to_filepaths[configuration_name][value].append(path)
merged_configurations[configuration_name] = value
return merged_configurations, property_name_to_value_to_filepaths
@staticmethod
def format_for_blueprint(configurations, attributes):
all_configs = []
for configuration_type, configuration_properties in configurations.iteritems():
is_content = False
all_configs.append({})
for config_with_content in ConfigMerge.CONFIGS_WITH_CONTENT:
if config_with_content in configuration_type:
is_content = True
break
if is_content:
content = LICENSE
for property_name, property_value in configuration_properties.iteritems():
content+=property_name + "=" + property_value + "\n"
all_configs[-1][configuration_type] = {'properties': {"content" : content}}
else:
all_configs[-1][configuration_type] = {'properties' :configuration_properties}
for configuration_type_attributes, properties_attributes in attributes.iteritems():
if properties_attributes and configuration_type == configuration_type_attributes:
all_configs[-1][configuration_type].update({"properties_attributes" : {"final" : properties_attributes}})
return {
"configurations": all_configs,
"host_groups": [],
"Blueprints": {}
}
@staticmethod
def format_conflicts_output(property_name_to_value_to_filepaths):
output = ""
for property_name, value_to_filepaths in property_name_to_value_to_filepaths.iteritems():
if len(value_to_filepaths) == 1:
continue
first_item = False
for value, filepaths in value_to_filepaths.iteritems():
if not first_item:
first_item = True
output += "\n\n=== {0} | {1} | {2} |\nHas conflicts with:\n\n".format(property_name,filepaths[0], value)
continue
for filepath in filepaths:
output += "| {0} | {1} | {2} |\n".format(property_name, filepath, value)
return output
def perform_merge(self):
result_configurations = {}
result_property_attributes = {}
has_conflicts = False
for filename, paths_and_parsers in self.config_files_map.iteritems():
filepath_to_configurations = {}
filepath_to_property_attributes = {}
configuration_type = os.path.splitext(filename)[0]
for path_and_parser in paths_and_parsers:
path, parser = path_and_parser
logger.debug("Read data from {0}".format(path))
parsed_configurations_from_path, parsed_properties_attributes = parser.read_data_to_map(path)
if parsed_configurations_from_path != None:
filepath_to_configurations[path] = parsed_configurations_from_path
if parsed_properties_attributes != None:
filepath_to_property_attributes[path] = parsed_properties_attributes
#configs merge
merged_configurations, property_name_to_value_to_filepaths = ConfigMerge.merge_configurations(
filepath_to_configurations)
#properties attributes merge
merged_attributes, property_name_to_attribute_to_filepaths = ConfigMerge.merge_configurations(
filepath_to_property_attributes)
configuration_conflicts_output = ConfigMerge.format_conflicts_output(property_name_to_value_to_filepaths)
attribute_conflicts_output = ConfigMerge.format_conflicts_output(property_name_to_attribute_to_filepaths)
if configuration_conflicts_output:
has_conflicts = True
conflict_filename = os.path.join(self.OUTPUT_DIR, configuration_type + "-conflicts.txt")
logger.warn(
"You have configurations conflicts for {0}. Please check {1}".format(configuration_type, conflict_filename))
with open(conflict_filename, "w") as fp:
fp.write(configuration_conflicts_output)
if attribute_conflicts_output:
has_conflicts = True
conflict_filename = os.path.join(self.OUTPUT_DIR, configuration_type + "-attributes-conflicts.txt")
logger.warn(
"You have property attribute conflicts for {0}. Please check {1}".format(configuration_type, conflict_filename))
with open(conflict_filename, "w") as fp:
fp.write(attribute_conflicts_output)
result_configurations[configuration_type] = merged_configurations
result_property_attributes[configuration_type] = merged_attributes
result_json_file = os.path.join(self.OUTPUT_DIR, "blueprint.json")
logger.info("Using '{0}' file as output for blueprint template".format(result_json_file))
with open(result_json_file, 'w') as outfile:
outfile.write(json.dumps(ConfigMerge.format_for_blueprint(result_configurations, result_property_attributes), sort_keys=True, indent=4,
separators=(',', ': ')))
if has_conflicts:
logger.info("Script finished with configurations conflicts, please resolve them before using the blueprint")
return 1
else:
logger.info("Script successfully finished")
return 0
def perform_diff(self):
configurations_conflicts = {}
attributes_conflicts = {}
file_conflicts = []
matches_configs = []
for right_configs_names in self.right_file_paths:
for left_configs_names in self.left_file_paths:
if right_configs_names == left_configs_names:
matches_configs.append(right_configs_names)
for match_config in matches_configs:
configurations_conflicts[match_config], attributes_conflicts[match_config] = ConfigMerge.configuration_diff(self.left_file_paths[match_config], self.right_file_paths[match_config])
file_conflicts = ConfigMerge.get_missing_files(self.right_file_paths, matches_configs, ConfigMerge.LEFT_INPUT_DIR) + \
ConfigMerge.get_missing_files(self.left_file_paths, matches_configs, ConfigMerge.RIGHT_INPUT_DIR)
configuration_diff_output = None
configuration_diff_output = ConfigMerge.format_diff_output(file_conflicts, configurations_conflicts, attributes_conflicts)
if configuration_diff_output and configuration_diff_output != "":
conflict_filename = os.path.join(ConfigMerge.OUTPUT_DIR, "file-diff.txt")
logger.warn(
"You have file diff conflicts. Please check {0}".format(conflict_filename))
with open(conflict_filename, "w") as fp:
fp.write(configuration_diff_output)
logger.info("Script successfully finished")
return 0
@staticmethod
def format_diff_output(file_conflicts, configurations_conflicts, attributes_conflicts):
output = ""
if file_conflicts:
output += "======= File diff conflicts ====== \n\n"
for file_conflict in file_conflicts:
output+=str(file_conflict)+"\n"
if configurations_conflicts:
output += "\n\n======= Property diff conflicts ====== "
for config_name, property in configurations_conflicts.iteritems():
if property:
output+= "\n\n||| " + config_name + " |||\n"
output+= "\n".join(str(p) for p in property)
if attributes_conflicts:
output += "\n\n======= Final attribute diff conflicts ====== "
for config_name, property_with_attribute in attributes_conflicts.iteritems():
if property_with_attribute:
output+= "\n\n||| " + config_name + " |||\n"
output+= "\n".join(str(p) for p in property_with_attribute)
return output
@staticmethod
def configuration_diff(left, right):
properties_conflicts = []
attributes_conflicts = []
left_path, left_parser = left[0]
left_configurations, left_attributes = left_parser.read_data_to_map(left_path)
right_path, right_parser = right[0]
right_configurations, right_attributes = right_parser.read_data_to_map(right_path)
matches_configs = []
matches_attributes = []
matches_configs, properties_conflicts = ConfigMerge.get_conflicts_and_matches(left_configurations, right_configurations, left_path, right_path)
properties_conflicts += ConfigMerge.get_missing_properties(left_configurations, matches_configs, right_path) + \
ConfigMerge.get_missing_properties(right_configurations, matches_configs, left_path)
if left_attributes and right_attributes:
matches_attributes, attributes_conflicts = ConfigMerge.get_conflicts_and_matches(left_attributes, right_attributes, left_path, right_path)
attributes_conflicts += ConfigMerge.get_missing_attributes(left_attributes, matches_attributes, right_path) + \
ConfigMerge.get_missing_attributes(right_attributes, matches_attributes, left_path)
elif left_attributes:
attributes_conflicts = ConfigMerge.get_missing_attributes(left_attributes, matches_attributes, right_path)
elif right_attributes:
attributes_conflicts = ConfigMerge.get_missing_attributes(right_attributes, matches_attributes, left_path)
return properties_conflicts, attributes_conflicts
@staticmethod
def get_conflicts_and_matches(left_items, right_items, left_path, right_path):
matches = []
conflicts = []
for left_key, left_value in left_items.iteritems():
for right_key, right_value in right_items.iteritems():
if left_key == right_key:
matches.append(right_key)
if left_value != right_value:
conflicts.append({right_key : [{left_path : left_value}, {right_path :right_value}]})
return matches, conflicts
@staticmethod
def get_missing_attributes(attributes, matches, file_path):
conflicts = []
for key, value in attributes.iteritems():
if not key in matches:
conflicts.append({key : "Final attribute is missing in {0} file".format(file_path)})
return conflicts
@staticmethod
def get_missing_properties(configurations, matches, file_path):
conflicts = []
for key, value in configurations.iteritems():
if not key in matches:
conflicts.append({key : "Property is missing in {0} file".format(file_path)})
return conflicts
@staticmethod
def get_missing_files(config_file_paths, matches, input_dir):
conflicts = []
for file_name in config_file_paths:
if file_name not in matches:
conflicts.append({file_name : "Configurations file is missing for {0} directory".format(input_dir)})
return conflicts
def main():
tempDir = tempfile.gettempdir()
outputDir = os.path.join(tempDir)
parser = optparse.OptionParser(usage="usage: %prog [options]")
parser.set_description('This python program is an Ambari thin client and '
'supports Ambari cluster takeover by generating a '
'configuration json that can be used with a '
'blueprint.\n\nIt reads actual hadoop configs '
'from a target directory and produces an out file '
'with problems found that need to be addressed and '
'the json file which can be used to create the '
'blueprint.\n\nThis script only works with *.xml *.yaml '
'and *.properties extensions of files.')
parser.add_option("-a", "--action", dest="action", default = "merge",
help="Script action. (merge/diff) [default: merge]")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="output verbosity.")
parser.add_option("-o", "--outputdir", dest="outputDir", default=outputDir,
metavar="FILE", help="Output directory. [default: /tmp]")
parser.add_option("-u", '--unknown-files-mapping-file',dest="unknown_files_mapping_file",
metavar="FILE", help=CONFIG_MAPPING_HELP_TEXT, default="takeover_files_mapping.json")
merge_options_group = OptionGroup(parser, "Required options for action 'merge'")
merge_options_group.add_option("-i", "--inputdir", dest="inputDir", help="Input directory.")
parser.add_option_group(merge_options_group)
diff_options_group = OptionGroup(parser, "Required options for action 'diff'")
diff_options_group.add_option("-l", "--leftInputDir", dest="leftInputDir", help="Left input directory.")
diff_options_group.add_option("-r", "--rightInputDir", dest="rightInputDir", help="Right input directory.")
parser.add_option_group(diff_options_group)
(options, args) = parser.parse_args()
# set verbose
if options.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ConfigMerge.OUTPUT_DIR = options.outputDir
if not os.path.exists(ConfigMerge.OUTPUT_DIR):
os.makedirs(ConfigMerge.OUTPUT_DIR)
logegr_file_name = os.path.join(ConfigMerge.OUTPUT_DIR, "takeover_config_merge.log")
file_handler = logging.FileHandler(logegr_file_name, mode="w")
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
#unknown file mapping
if options.unknown_files_mapping_file and os.path.exists(options.unknown_files_mapping_file):
ConfigMerge.UNKNOWN_FILES_MAPPING_FILE = options.unknown_files_mapping_file
with open(options.unknown_files_mapping_file) as f:
ConfigMerge.CONTENT_UNKNOWN_FILES_MAPPING_FILE = json.load(f)
else:
logger.warning("Config mapping file was not found at {0}. "
"Please provide it at the given path or provide a different path to it using -u option.".format(options.unknown_files_mapping_file))
if options.action == "merge" :
ConfigMerge.INPUT_DIR = options.inputDir
file_paths = ConfigMerge.get_all_supported_files_grouped_by_name(directory=ConfigMerge.INPUT_DIR)
logger.info("Writing logs into '{0}' file".format(logegr_file_name))
logger.debug("Following configuration files found:\n{0}".format(file_paths.items()))
config_merge = ConfigMerge(config_files_map=file_paths)
return config_merge.perform_merge()
elif options.action == "diff" :
if options.leftInputDir and os.path.isdir(options.leftInputDir):
ConfigMerge.LEFT_INPUT_DIR = options.leftInputDir
else:
logger.error("Directory \"{0}\" doesn't exist. Use option \"-h\" for details".format(options.leftInputDir))
return -1
if options.rightInputDir and os.path.isdir(options.rightInputDir):
ConfigMerge.RIGHT_INPUT_DIR = options.rightInputDir
else:
logger.error("Directory \"{0}\" doesn't exist. Use option \"-h\" for details".format(options.rightInputDir))
return -1
logger.info("Writing logs into '{0}' file".format(logegr_file_name))
left_file_paths = ConfigMerge.get_all_supported_files_grouped_by_name(directory=ConfigMerge.LEFT_INPUT_DIR)
logger.debug("Following configuration files found:\n{0} for left directory".format(left_file_paths.items()))
right_file_paths = ConfigMerge.get_all_supported_files_grouped_by_name(directory=ConfigMerge.RIGHT_INPUT_DIR)
logger.debug("Following configuration files found:\n{0} for right directory".format(right_file_paths.items()))
config_merge = ConfigMerge(left_file_paths=left_file_paths , right_file_paths=right_file_paths)
return config_merge.perform_diff()
else:
logger.error("Action \"{0}\" doesn't supports by script. Use option \"-h\" for details".format(options.action))
return -1
if __name__ == "__main__":
try:
sys.exit(main())
except (KeyboardInterrupt, EOFError):
print("\nAborting ... Keyboard Interrupt.")
sys.exit(1)
| apache-2.0 |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/resources/types/managed_placement_view.py | 1 | 1296 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.resources',
marshal='google.ads.googleads.v7',
manifest={
'ManagedPlacementView',
},
)
class ManagedPlacementView(proto.Message):
r"""A managed placement view.
Attributes:
resource_name (str):
Output only. The resource name of the Managed Placement
view. Managed placement view resource names have the form:
``customers/{customer_id}/managedPlacementViews/{ad_group_id}~{criterion_id}``
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
TamiaLab/PySkCode | skcode/utility/cosmetics.py | 1 | 1689 | """
SkCode cosmetics replacement utility code.
"""
import re
# Default cosmetics replacement map (compiled regex)
DEFAULT_COSMETICS_MAP = (
(re.compile(r'(\s+)---(\s+)'), r'\1—\2'),
(re.compile(r'(\s+)--(\s+)'), r'\1—\2'),
(re.compile(r'\.\.\.'), r'…'),
(re.compile(r'\(tm\)'), r'™'),
)
# Document attribute name for storing the base URL
COSMETICS_MAP_ATTR_NAME = 'COSMETICS_MAP'
def setup_cosmetics_replacement(document_tree, cosmetics_map=DEFAULT_COSMETICS_MAP):
"""
Setup the document for cosmetics replacement.
:param document_tree: The document tree instance to be setup.
:param cosmetics_map: A tuple of tuple with two values ``(compiled_regex, replacement_str)``.
"""
assert document_tree, "Document tree is mandatory."
assert document_tree.is_root, "Document tree must be a root tree node instance."
# Inject cosmetics map into the document attributes
document_tree.attrs[COSMETICS_MAP_ATTR_NAME] = cosmetics_map or ()
def do_cosmetics_replacement(root_tree_node, input_text):
"""
Do all cosmetics replacement.
:param root_tree_node: The root tree node instance.
:param input_text: The input text to be processed.
:return: The input text with all cosmetics replacement done.
"""
# Shortcut if not text
if not input_text:
return ''
# Get the cosmetics map from the options container
cosmetics_map = root_tree_node.attrs.get(COSMETICS_MAP_ATTR_NAME, ())
# Process all cosmetics
output_text = input_text
for regex, replacement in cosmetics_map:
output_text = regex.sub(replacement, output_text)
# Return the result
return output_text
| agpl-3.0 |
steveb/heat | heat/tests/test_support.py | 13 | 3769 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.engine import support
from heat.tests import common
class SupportStatusTest(common.HeatTestCase):
def test_valid_status(self):
for sstatus in support.SUPPORT_STATUSES:
previous = support.SupportStatus(version='test_version')
status = support.SupportStatus(
status=sstatus,
message='test_message',
version='test_version',
previous_status=previous,
)
self.assertEqual(sstatus, status.status)
self.assertEqual('test_message', status.message)
self.assertEqual('test_version', status.version)
self.assertEqual(previous, status.previous_status)
self.assertEqual({
'status': sstatus,
'message': 'test_message',
'version': 'test_version',
'previous_status': {'status': 'SUPPORTED',
'message': None,
'version': 'test_version',
'previous_status': None},
}, status.to_dict())
def test_invalid_status(self):
status = support.SupportStatus(
status='RANDOM',
message='test_message',
version='test_version',
previous_status=support.SupportStatus()
)
self.assertEqual(support.UNKNOWN, status.status)
self.assertEqual('Specified status is invalid, defaulting to UNKNOWN',
status.message)
self.assertIsNone(status.version)
self.assertIsNone(status.previous_status)
self.assertEqual({
'status': 'UNKNOWN',
'message': 'Specified status is invalid, defaulting to UNKNOWN',
'version': None,
'previous_status': None,
}, status.to_dict())
def test_previous_status(self):
sstatus = support.SupportStatus(
status=support.DEPRECATED,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.SUPPORTED,
version='2015.1'
)
)
self.assertEqual(support.DEPRECATED, sstatus.status)
self.assertEqual('5.0.0', sstatus.version)
self.assertEqual(support.SUPPORTED, sstatus.previous_status.status)
self.assertEqual('2015.1', sstatus.previous_status.version)
self.assertEqual({'status': 'DEPRECATED',
'version': '5.0.0',
'message': None,
'previous_status': {'status': 'SUPPORTED',
'version': '2015.1',
'message': None,
'previous_status': None}},
sstatus.to_dict())
def test_invalid_previous_status(self):
ex = self.assertRaises(ValueError,
support.SupportStatus, previous_status='YARRR')
self.assertEqual('previous_status must be SupportStatus '
'instead of %s' % str, six.text_type(ex))
| apache-2.0 |
Connexions/openstax-cms | oxauth/management/commands/prune_accounts.py | 2 | 2416 | from wagtail.core.models import Page
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
class Command(BaseCommand):
help = "remove all users that do not have content created or admin access"
def handle(self, *args, **options):
pages = Page.objects.all()
keep_users = []
orphaned_books = []
# we want to save all superuser accounts and content creator accounts
save_groups = ["Blogger", "Content Development Intern", "Content Managers", "Content Provider", "Customer Service", "Editors"]
save_users_in_groups = User.objects.filter(groups__name__in=save_groups)
for user in save_users_in_groups:
keep_users.append(user.pk) # keeping all users in the above groups
print("Keeping {} users in administrative roles.".format(save_users_in_groups.count()))
supserusers = User.objects.filter(is_superuser=True)
for user in supserusers:
keep_users.append(user.pk) # keeping all superusers
print("Keeping {} users with the superuser delegation.".format(supserusers.count()))
owner_count = 0
for page in pages:
try:
if page.owner:
keep_users.append(page.owner.pk) # keeping all page owners
owner_count = owner_count + 1
except User.DoesNotExist:
# TODO: Should we do something about this here - assign to existing user?
orphaned_books.append(page)
#print("Owner for {} for page does not exist on this system.".format(page.title))
print("Keeping {} users that have authored content in the CMS.".format(owner_count))
print("Found {} books that have no owner (owner deleted previously).".format(len(orphaned_books)))
keep_users = set(keep_users) # convert list to set so we have a unique list of users to keep
print("Converging unique users. {} users will be kept.".format(len(keep_users)))
users_all = User.objects.all()
print("{} - Total users on the system".format(users_all.count()))
users_slated_for_removal = User.objects.all().exclude(pk__in=set(keep_users))
print("{} - Total users slated for removal".format(users_slated_for_removal.count()))
users_slated_for_removal.delete()
print("Users have been successfully pruned!")
| agpl-3.0 |
gregbanks/suds | suds/bindings/binding.py | 1 | 19048 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides classes for (WS) SOAP bindings.
"""
from logging import getLogger
from suds import *
from suds.sax import Namespace
from suds.sax.parser import Parser
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sudsobject import Factory, Object
from suds.mx import Content
from suds.mx.literal import Literal as MxLiteral
from suds.umx.basic import Basic as UmxBasic
from suds.umx.typed import Typed as UmxTyped
from suds.bindings.multiref import MultiRef
from suds.xsd.query import TypeQuery, ElementQuery
from suds.xsd.sxbasic import Element as SchemaElement
from suds.options import Options
from suds.plugin import PluginContainer
from copy import deepcopy
log = getLogger(__name__)
envns = ('SOAP-ENV', 'http://schemas.xmlsoap.org/soap/envelope/')
class Binding:
"""
The soap binding class used to process outgoing and imcoming
soap messages per the WSDL port binding.
@cvar replyfilter: The reply filter function.
@type replyfilter: (lambda s,r: r)
@ivar wsdl: The wsdl.
@type wsdl: L{suds.wsdl.Definitions}
@ivar schema: The collective schema contained within the wsdl.
@type schema: L{xsd.schema.Schema}
@ivar options: A dictionary options.
@type options: L{Options}
"""
replyfilter = (lambda s,r: r)
def __init__(self, wsdl):
"""
@param wsdl: A wsdl.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.multiref = MultiRef()
def schema(self):
return self.wsdl.schema
def options(self):
return self.wsdl.options
def unmarshaller(self, typed=True):
"""
Get the appropriate XML decoder.
@return: Either the (basic|typed) unmarshaller.
@rtype: L{UmxTyped}
"""
if typed:
return UmxTyped(self.schema())
else:
return UmxBasic()
def marshaller(self):
"""
Get the appropriate XML encoder.
@return: An L{MxLiteral} marshaller.
@rtype: L{MxLiteral}
"""
return MxLiteral(self.schema(), self.options().xstq)
def param_defs(self, method):
"""
Get parameter definitions.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A servic emethod.
@type method: I{service.Method}
@return: A collection of parameter definitions
@rtype: [I{pdef},..]
"""
raise Exception, 'not implemented'
def get_message(self, method, args, kwargs):
"""
Get the soap message for the specified method, args and soapheaders.
This is the entry point for creating the outbound soap message.
@param method: The method being invoked.
@type method: I{service.Method}
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The soap envelope.
@rtype: L{Document}
"""
content = self.headercontent(method)
header = self.header(content)
content = self.bodycontent(method, args, kwargs)
body = self.body(content)
env = self.envelope(header, body)
if self.options().prefixes:
body.normalizePrefixes()
env.promotePrefixes()
else:
env.refitPrefixes()
return Document(env)
def get_reply(self, method, reply):
"""
Process the I{reply} for the specified I{method} by sax parsing the I{reply}
and then unmarshalling into python object(s).
@param method: The name of the invoked method.
@type method: str
@param reply: The reply XML received after invoking the specified method.
@type reply: str
@return: The unmarshalled reply. The returned value is an L{Object} for a
I{list} depending on whether the service returns a single object or a
collection.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
replyroot = sax.parse(string=reply)
plugins = PluginContainer(self.options().plugins)
plugins.message.parsed(reply=replyroot)
soapenv = replyroot.getChild('Envelope')
soapenv.promotePrefixes()
soapbody = soapenv.getChild('Body')
self.detect_fault(soapbody)
soapbody = self.multiref.process(soapbody)
nodes = self.replycontent(method, soapbody)
rtypes = self.returned_types(method)
if len(rtypes) > 1:
result = self.replycomposite(rtypes, nodes)
return (replyroot, result)
if len(rtypes) == 1:
if rtypes[0].unbounded():
result = self.replylist(rtypes[0], nodes)
return (replyroot, result)
if len(nodes):
unmarshaller = self.unmarshaller()
resolved = rtypes[0].resolve(nobuiltin=True)
result = unmarshaller.process(nodes[0], resolved)
return (replyroot, result)
return (replyroot, None)
def detect_fault(self, body):
"""
Detect I{hidden} soapenv:Fault element in the soap body.
@param body: The soap envelope body.
@type body: L{Element}
@raise WebFault: When found.
"""
fault = body.getChild('Fault', envns)
if fault is None:
return
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, fault)
return self
def replylist(self, rt, nodes):
"""
Construct a I{list} reply. This mehod is called when it has been detected
that the reply is a list.
@param rt: The return I{type}.
@type rt: L{suds.xsd.sxbase.SchemaObject}
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: A list of I{unmarshalled} objects.
@rtype: [L{Object},...]
"""
result = []
resolved = rt.resolve(nobuiltin=True)
unmarshaller = self.unmarshaller()
for node in nodes:
sobject = unmarshaller.process(node, resolved)
result.append(sobject)
return result
def replycomposite(self, rtypes, nodes):
"""
Construct a I{composite} reply. This method is called when it has been
detected that the reply has multiple root nodes.
@param rtypes: A list of known return I{types}.
@type rtypes: [L{suds.xsd.sxbase.SchemaObject},...]
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: The I{unmarshalled} composite object.
@rtype: L{Object},...
"""
dictionary = {}
for rt in rtypes:
dictionary[rt.name] = rt
unmarshaller = self.unmarshaller()
composite = Factory.object('reply')
for node in nodes:
tag = node.name
rt = dictionary.get(tag, None)
if rt is None:
if node.get('id') is None:
raise Exception('<%s/> not mapped to message part' % tag)
else:
continue
resolved = rt.resolve(nobuiltin=True)
sobject = unmarshaller.process(node, resolved)
value = getattr(composite, tag, None)
if value is None:
if rt.unbounded():
value = []
setattr(composite, tag, value)
value.append(sobject)
else:
setattr(composite, tag, sobject)
else:
if not isinstance(value, list):
value = [value,]
setattr(composite, tag, value)
value.append(sobject)
return composite
def get_fault(self, reply):
"""
Extract the fault from the specified soap reply. If I{faults} is True, an
exception is raised. Otherwise, the I{unmarshalled} fault L{Object} is
returned. This method is called when the server raises a I{web fault}.
@param reply: A soap reply message.
@type reply: str
@return: A fault object.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
faultroot = sax.parse(string=reply)
soapenv = faultroot.getChild('Envelope')
soapbody = soapenv.getChild('Body')
fault = soapbody.getChild('Fault')
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, faultroot)
return (faultroot, p.detail)
def mkparam(self, method, pdef, object):
"""
Builds a parameter for the specified I{method} using the parameter
definition (pdef) and the specified value (object).
@param method: A method name.
@type method: str
@param pdef: A parameter definition.
@type pdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The parameter value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
content = \
Content(tag=pdef[0],
value=object,
type=pdef[1],
real=pdef[1].resolve())
return marshaller.process(content)
def mkheader(self, method, hdef, object):
"""
Builds a soapheader for the specified I{method} using the header
definition (hdef) and the specified value (object).
@param method: A method name.
@type method: str
@param hdef: A header definition.
@type hdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The header value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
if isinstance(object, (list, tuple)):
tags = []
for item in object:
tags.append(self.mkheader(method, hdef, item))
return tags
content = Content(tag=hdef[0], value=object, type=hdef[1])
return marshaller.process(content)
def envelope(self, header, body):
"""
Build the B{<Envelope/>} for an soap outbound message.
@param header: The soap message B{header}.
@type header: L{Element}
@param body: The soap message B{body}.
@type body: L{Element}
@return: The soap envelope containing the body and header.
@rtype: L{Element}
"""
env = Element('Envelope', ns=envns)
env.addPrefix(Namespace.xsins[0], Namespace.xsins[1])
env.append(header)
env.append(body)
return env
def header(self, content):
"""
Build the B{<Body/>} for an soap outbound message.
@param content: The header content.
@type content: L{Element}
@return: the soap body fragment.
@rtype: L{Element}
"""
header = Element('Header', ns=envns)
header.append(content)
return header
def bodycontent(self, method, args, kwargs):
"""
Get the content for the soap I{body} node.
@param method: A service method.
@type method: I{service.Method}
@param args: method parameter values
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The xml content for the <body/>
@rtype: [L{Element},..]
"""
raise Exception, 'not implemented'
def headercontent(self, method):
"""
Get the content for the soap I{Header} node.
@param method: A service method.
@type method: I{service.Method}
@return: The xml content for the <body/>
@rtype: [L{Element},..]
"""
n = 0
content = []
wsse = self.options().wsse
if wsse is not None:
content.append(wsse.xml())
headers = self.options().soapheaders
if not isinstance(headers, (tuple,list,dict)):
headers = (headers,)
if len(headers) == 0:
return content
pts = self.headpart_types(method)
if isinstance(headers, (tuple,list)):
for header in headers:
if isinstance(header, Element):
content.append(deepcopy(header))
continue
if len(pts) == n: break
h = self.mkheader(method, pts[n], header)
ns = pts[n][1].namespace('ns0')
h.setPrefix(ns[0], ns[1])
content.append(h)
n += 1
else:
for pt in pts:
header = headers.get(pt[0])
if header is None:
continue
h = self.mkheader(method, pt, header)
ns = pt[1].namespace('ns0')
h.setPrefix(ns[0], ns[1])
content.append(h)
return content
def replycontent(self, method, body):
"""
Get the reply body content.
@param method: A service method.
@type method: I{service.Method}
@param body: The soap body
@type body: L{Element}
@return: the body content
@rtype: [L{Element},...]
"""
raise Exception, 'not implemented'
def body(self, content):
"""
Build the B{<Body/>} for an soap outbound message.
@param content: The body content.
@type content: L{Element}
@return: the soap body fragment.
@rtype: L{Element}
"""
body = Element('Body', ns=envns)
body.append(content)
return body
def bodypart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdef) defined for the specified method.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},]
"""
result = []
if input:
parts = method.soap.input.body.parts
else:
parts = method.soap.output.body.parts
for p in parts:
if p.element is not None:
query = ElementQuery(p.element)
else:
query = TypeQuery(p.type)
pt = query.execute(self.schema())
if pt is None:
raise TypeNotFound(query.ref)
if p.type is not None:
pt = PartElement(p.name, pt)
if input:
if pt.name is None:
result.append((p.name, pt))
else:
result.append((pt.name, pt))
else:
result.append(pt)
return result
def headpart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdef) defined for the specified method.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},]
"""
result = []
if input:
headers = method.soap.input.headers
else:
headers = method.soap.output.headers
for header in headers:
part = header.part
if part.element is not None:
query = ElementQuery(part.element)
else:
query = TypeQuery(part.type)
pt = query.execute(self.schema())
if pt is None:
raise TypeNotFound(query.ref)
if part.type is not None:
pt = PartElement(part.name, pt)
if input:
if pt.name is None:
result.append((part.name, pt))
else:
result.append((pt.name, pt))
else:
result.append(pt)
return result
def returned_types(self, method):
"""
Get the L{xsd.sxbase.SchemaObject} returned by the I{method}.
@param method: A service method.
@type method: I{service.Method}
@return: The name of the type return by the method.
@rtype: [I{rtype},..]
"""
result = []
for rt in self.bodypart_types(method, input=False):
result.append(rt)
return result
class PartElement(SchemaElement):
"""
A part used to represent a message part when the part
references a schema type and thus assumes to be an element.
@ivar resolved: The part type.
@type resolved: L{suds.xsd.sxbase.SchemaObject}
"""
def __init__(self, name, resolved):
"""
@param name: The part name.
@type name: str
@param resolved: The part type.
@type resolved: L{suds.xsd.sxbase.SchemaObject}
"""
root = Element('element', ns=Namespace.xsdns)
SchemaElement.__init__(self, resolved.schema, root)
self.__resolved = resolved
self.name = name
self.form_qualified = False
def implany(self):
return self
def optional(self):
return True
def namespace(self, prefix=None):
return Namespace.default
def resolve(self, nobuiltin=False):
if nobuiltin and self.__resolved.builtin():
return self
else:
return self.__resolved
| lgpl-3.0 |
UNINETT/nav | tools/eventgenerators/snmpevent.py | 2 | 2774 | #!/usr/bin/env python3
#
# Copyright (C) 2007, 2012, 2017 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"Script to simulate snmpAgentState events from ipdevpoll"
from __future__ import print_function
import sys
from nav import db
from nav.event import Event
connection = db.getConnection('default')
database = connection.cursor()
def handler(nblist, state):
for netboxid in nblist:
e = Event('ipdevpoll', 'eventEngine', netboxid=netboxid,
eventtypeid='snmpAgentState', state=state, severity=100)
e['alerttype'] = 'snmpAgentDown' if state == 's' else 'snmpAgentUp'
e.post()
if (len(sys.argv) <= 2):
print("Not enough arguments (%d), <match spec> <up|down>" % len(sys.argv))
sys.exit(0)
nb = []
nbdup = set()
sysnames = []
for ii in range(1, len(sys.argv)-1):
sql = "SELECT netboxid,sysname,typeid FROM netbox JOIN room USING(roomid) WHERE ip IS NOT NULL";
qn = sys.argv[ii]
if (qn.startswith("_") or qn.startswith("-") or qn.startswith("%") or qn.find(",") >= 0):
if (qn.startswith("-")):
qn = qn[1:len(qn)]
sql += " AND typeid IN ("
elif (qn.startswith("_")):
qn = qn[1:len(qn)]
sql += " AND catid IN ("
elif (qn.startswith("%")):
qn = qn[1:len(qn)]
sql += " AND roomid IN ("
else:
sql += " AND sysname IN ("
ids = qn.split(",")
for i in range(0, len(ids)):
sql += "'" + ids[i] + "',"
if len(ids) > 0: sql = sql[0:len(sql)-1]
sql += ")"
else:
sql += " AND sysname LIKE '"+qn+"'"
database.execute(sql)
for netboxid, sysname, typeid in database.fetchall():
if not netboxid in nbdup:
nb.append(netboxid)
sysnames.append(sysname)
nbdup.add(netboxid)
if sys.argv[len(sys.argv)-1].startswith("u"): state = "e"
elif sys.argv[len(sys.argv)-1].startswith("d"): state = "s"
else:
print("Unknown state: " + sys.argv[len(sys.argv)-1])
sys.exit(0)
if (state=="e"): updown = "up"
else: updown="down"
print("SNMP agents going %s on: %r" % (updown, sysnames))
handler(nb, state)
connection.commit()
| gpl-2.0 |
freedomtan/tensorflow | tensorflow/python/estimator/export/export_output.py | 41 | 1319 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""export_output python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.export import export_output
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
export_output.__all__ = [
s for s in dir(export_output) if not s.startswith('__')
]
from tensorflow_estimator.python.estimator.export.export_output import *
| apache-2.0 |
hatoncat/limbo-android | jni/qemu/roms/seabios/tools/acpi_extract.py | 44 | 10031 | #!/usr/bin/python
# Copyright (C) 2011 Red Hat, Inc., Michael S. Tsirkin <mst@redhat.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
# Process mixed ASL/AML listing (.lst file) produced by iasl -l
# Locate and execute ACPI_EXTRACT directives, output offset info
#
# Documentation of ACPI_EXTRACT_* directive tags:
#
# These directive tags output offset information from AML for BIOS runtime
# table generation.
# Each directive is of the form:
# ACPI_EXTRACT_<TYPE> <array_name> <Operator> (...)
# and causes the extractor to create an array
# named <array_name> with offset, in the generated AML,
# of an object of a given type in the following <Operator>.
#
# A directive must fit on a single code line.
#
# Object type in AML is verified, a mismatch causes a build failure.
#
# Directives and operators currently supported are:
# ACPI_EXTRACT_NAME_DWORD_CONST - extract a Dword Const object from Name()
# ACPI_EXTRACT_NAME_WORD_CONST - extract a Word Const object from Name()
# ACPI_EXTRACT_NAME_BYTE_CONST - extract a Byte Const object from Name()
# ACPI_EXTRACT_METHOD_STRING - extract a NameString from Method()
# ACPI_EXTRACT_NAME_STRING - extract a NameString from Name()
# ACPI_EXTRACT_PROCESSOR_START - start of Processor() block
# ACPI_EXTRACT_PROCESSOR_STRING - extract a NameString from Processor()
# ACPI_EXTRACT_PROCESSOR_END - offset at last byte of Processor() + 1
#
# ACPI_EXTRACT_ALL_CODE - create an array storing the generated AML bytecode
#
# ACPI_EXTRACT is not allowed anywhere else in code, except in comments.
import re;
import sys;
import fileinput;
aml = []
asl = []
output = {}
debug = ""
class asl_line:
line = None
lineno = None
aml_offset = None
def die(diag):
sys.stderr.write("Error: %s; %s\n" % (diag, debug))
sys.exit(1)
#Store an ASL command, matching AML offset, and input line (for debugging)
def add_asl(lineno, line):
l = asl_line()
l.line = line
l.lineno = lineno
l.aml_offset = len(aml)
asl.append(l)
#Store an AML byte sequence
#Verify that offset output by iasl matches # of bytes so far
def add_aml(offset, line):
o = int(offset, 16);
# Sanity check: offset must match size of code so far
if (o != len(aml)):
die("Offset 0x%x != 0x%x" % (o, len(aml)))
# Strip any trailing dots and ASCII dump after "
line = re.sub(r'\s*\.*\s*".*$',"", line)
# Strip traling whitespace
line = re.sub(r'\s+$',"", line)
# Strip leading whitespace
line = re.sub(r'^\s+',"", line)
# Split on whitespace
code = re.split(r'\s+', line)
for c in code:
# Require a legal hex number, two digits
if (not(re.search(r'^[0-9A-Fa-f][0-9A-Fa-f]$', c))):
die("Unexpected octet %s" % c);
aml.append(int(c, 16));
# Process aml bytecode array, decoding AML
def aml_pkglen_bytes(offset):
# PkgLength can be multibyte. Bits 8-7 give the # of extra bytes.
pkglenbytes = aml[offset] >> 6;
return pkglenbytes + 1
def aml_pkglen(offset):
pkgstart = offset
pkglenbytes = aml_pkglen_bytes(offset)
pkglen = aml[offset] & 0x3F
# If multibyte, first nibble only uses bits 0-3
if ((pkglenbytes > 0) and (pkglen & 0x30)):
die("PkgLen bytes 0x%x but first nibble 0x%x expected 0x0X" %
(pkglen, pkglen))
offset += 1
pkglenbytes -= 1
for i in range(pkglenbytes):
pkglen |= aml[offset + i] << (i * 8 + 4)
if (len(aml) < pkgstart + pkglen):
die("PckgLen 0x%x at offset 0x%x exceeds AML size 0x%x" %
(pkglen, offset, len(aml)))
return pkglen
# Given method offset, find its NameString offset
def aml_method_string(offset):
#0x14 MethodOp PkgLength NameString MethodFlags TermList
if (aml[offset] != 0x14):
die( "Method offset 0x%x: expected 0x14 actual 0x%x" %
(offset, aml[offset]));
offset += 1;
pkglenbytes = aml_pkglen_bytes(offset)
offset += pkglenbytes;
return offset;
# Given name offset, find its NameString offset
def aml_name_string(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x08):
die( "Name offset 0x%x: expected 0x08 actual 0x%x" %
(offset, aml[offset]));
return offset + 1;
# Given data offset, find dword const offset
def aml_data_dword_const(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x0C):
die( "Name offset 0x%x: expected 0x0C actual 0x%x" %
(offset, aml[offset]));
return offset + 1;
# Given data offset, find word const offset
def aml_data_word_const(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x0B):
die( "Name offset 0x%x: expected 0x0B actual 0x%x" %
(offset, aml[offset]));
return offset + 1;
# Given data offset, find byte const offset
def aml_data_byte_const(offset):
#0x08 NameOp NameString DataRef
if (aml[offset] != 0x0A):
die( "Name offset 0x%x: expected 0x0A actual 0x%x" %
(offset, aml[offset]));
return offset + 1;
# Given name offset, find dword const offset
def aml_name_dword_const(offset):
return aml_data_dword_const(aml_name_string(offset) + 4)
# Given name offset, find word const offset
def aml_name_word_const(offset):
return aml_data_word_const(aml_name_string(offset) + 4)
# Given name offset, find byte const offset
def aml_name_byte_const(offset):
return aml_data_byte_const(aml_name_string(offset) + 4)
def aml_processor_start(offset):
#0x5B 0x83 ProcessorOp PkgLength NameString ProcID
if ((aml[offset] != 0x5B) or (aml[offset + 1] != 0x83)):
die( "Name offset 0x%x: expected 0x5B 0x83 actual 0x%x 0x%x" %
(offset, aml[offset], aml[offset + 1]));
return offset
def aml_processor_string(offset):
#0x5B 0x83 ProcessorOp PkgLength NameString ProcID
start = aml_processor_start(offset)
offset += 2
pkglenbytes = aml_pkglen_bytes(offset)
offset += pkglenbytes
return offset
def aml_processor_end(offset):
start = aml_processor_start(offset)
offset += 2
pkglenbytes = aml_pkglen_bytes(offset)
pkglen = aml_pkglen(offset)
return offset + pkglen
lineno = 0
for line in fileinput.input():
# Strip trailing newline
line = line.rstrip();
# line number and debug string to output in case of errors
lineno = lineno + 1
debug = "input line %d: %s" % (lineno, line)
#ASL listing: space, then line#, then ...., then code
pasl = re.compile('^\s+([0-9]+)\.\.\.\.\s*')
m = pasl.search(line)
if (m):
add_asl(lineno, pasl.sub("", line));
# AML listing: offset in hex, then ...., then code
paml = re.compile('^([0-9A-Fa-f]+)\.\.\.\.\s*')
m = paml.search(line)
if (m):
add_aml(m.group(1), paml.sub("", line))
# Now go over code
# Track AML offset of a previous non-empty ASL command
prev_aml_offset = -1
for i in range(len(asl)):
debug = "input line %d: %s" % (asl[i].lineno, asl[i].line)
l = asl[i].line
# skip if not an extract directive
a = len(re.findall(r'ACPI_EXTRACT', l))
if (not a):
# If not empty, store AML offset. Will be used for sanity checks
# IASL seems to put {}. at random places in the listing.
# Ignore any non-words for the purpose of this test.
m = re.search(r'\w+', l)
if (m):
prev_aml_offset = asl[i].aml_offset
continue
if (a > 1):
die("Expected at most one ACPI_EXTRACT per line, actual %d" % a)
mext = re.search(r'''
^\s* # leading whitespace
/\*\s* # start C comment
(ACPI_EXTRACT_\w+) # directive: group(1)
\s+ # whitspace separates directive from array name
(\w+) # array name: group(2)
\s*\*/ # end of C comment
\s*$ # trailing whitespace
''', l, re.VERBOSE)
if (not mext):
die("Stray ACPI_EXTRACT in input")
# previous command must have produced some AML,
# otherwise we are in a middle of a block
if (prev_aml_offset == asl[i].aml_offset):
die("ACPI_EXTRACT directive in the middle of a block")
directive = mext.group(1)
array = mext.group(2)
offset = asl[i].aml_offset
if (directive == "ACPI_EXTRACT_ALL_CODE"):
if array in output:
die("%s directive used more than once" % directive)
output[array] = aml
continue
if (directive == "ACPI_EXTRACT_NAME_DWORD_CONST"):
offset = aml_name_dword_const(offset)
elif (directive == "ACPI_EXTRACT_NAME_WORD_CONST"):
offset = aml_name_word_const(offset)
elif (directive == "ACPI_EXTRACT_NAME_BYTE_CONST"):
offset = aml_name_byte_const(offset)
elif (directive == "ACPI_EXTRACT_NAME_STRING"):
offset = aml_name_string(offset)
elif (directive == "ACPI_EXTRACT_METHOD_STRING"):
offset = aml_method_string(offset)
elif (directive == "ACPI_EXTRACT_PROCESSOR_START"):
offset = aml_processor_start(offset)
elif (directive == "ACPI_EXTRACT_PROCESSOR_STRING"):
offset = aml_processor_string(offset)
elif (directive == "ACPI_EXTRACT_PROCESSOR_END"):
offset = aml_processor_end(offset)
else:
die("Unsupported directive %s" % directive)
if array not in output:
output[array] = []
output[array].append(offset)
debug = "at end of file"
def get_value_type(maxvalue):
#Use type large enough to fit the table
if (maxvalue >= 0x10000):
return "int"
elif (maxvalue >= 0x100):
return "short"
else:
return "char"
# Pretty print output
for array in output.keys():
otype = get_value_type(max(output[array]))
odata = []
for value in output[array]:
odata.append("0x%x" % value)
sys.stdout.write("static unsigned %s %s[] = {\n" % (otype, array))
sys.stdout.write(",\n".join(odata))
sys.stdout.write('\n};\n');
| gpl-2.0 |
yury-s/v8-inspector | Source/chrome/tools/telemetry/telemetry/unittest_util/test_page_test_results.py | 53 | 1158 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.results import page_test_results
from telemetry.value import scalar
class TestPageTestResults(
page_test_results.PageTestResults):
def __init__(self, test):
super(TestPageTestResults, self).__init__()
self.test = test
page = page_module.Page("http://www.google.com", {})
self.WillRunPage(page)
def GetPageSpecificValueNamed(self, name):
values = [value for value in self.all_page_specific_values
if value.name == name]
assert len(values) == 1, 'Could not find value named %s' % name
return values[0]
def AssertHasPageSpecificScalarValue(self, name, units, expected_value):
value = self.GetPageSpecificValueNamed(name)
self.test.assertEquals(units, value.units)
self.test.assertTrue(isinstance(value, scalar.ScalarValue))
self.test.assertEquals(expected_value, value.value)
def __str__(self):
return '\n'.join([repr(x) for x in self.all_page_specific_values])
| bsd-3-clause |
pyq881120/amoco | amoco/arch/x64/formats.py | 5 | 1941 | # -*- coding: utf-8 -*-
from amoco.arch.core import Formatter
def pfx(i):
if i.misc['pfx'] is None: return ''
pfxgrp0 = i.misc['pfx'][0]
if pfxgrp0 is None: return ''
return '%s '%pfxgrp0
def mnemo(i):
mnemo = i.mnemonic.replace('cc','')
if hasattr(i,'cond'): mnemo += i.cond[0].split('/')[0]
return '{: <12}'.format(mnemo.lower())
def deref(op):
if not op._is_mem: return str(op)
d = '%+d'%op.a.disp if op.a.disp else ''
s = {8:'byte ptr ',16:'word ptr ',32:'dword ptr ', 128:'xmmword ptr '}.get(op.size,'')
s += '%s:'%op.a.seg if op.a.seg is not '' else ''
s += '[%s%s]'%(op.a.base,d)
return s
def opers(i):
s = []
for op in i.operands:
if op._is_mem:
s.append(deref(op))
continue
elif op._is_cst:
if i.misc['imm_ref'] is not None:
s.append(str(i.misc['imm_ref']))
continue
elif op.sf:
s.append('%+d'%op.value)
continue
# default:
s.append(str(op))
return ', '.join(s)
def oprel(i):
to = i.misc['to']
if to is not None: return '*'+str(to)
if (i.address is not None) and i.operands[0]._is_cst:
v = i.address + i.operands[0].signextend(64) + i.length
i.misc['to'] = v
return '*'+str(v)
return '.%+d'%i.operands[0].value
# main intel formats:
format_intel_default = (mnemo,opers)
format_intel_ptr = (mnemo,opers)
format_intel_str = (pfx,mnemo,opers)
format_intel_rel = (mnemo,oprel)
# formats:
IA32e_Intel_formats = {
'ia32_strings' : format_intel_str,
'ia32_mov_adr' : format_intel_ptr,
'ia32_ptr_ib' : format_intel_ptr,
'ia32_ptr_iwd' : format_intel_ptr,
'ia32_rm8' : format_intel_ptr,
'ia32_rm32' : format_intel_ptr,
'ia32_imm_rel' : format_intel_rel,
}
IA32e_Intel = Formatter(IA32e_Intel_formats)
IA32e_Intel.default = format_intel_default
| gpl-2.0 |
geminateCoder/Character-Archive-Website | Lib/site-packages/sqlalchemy/orm/query.py | 20 | 147616 | # orm/query.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative, InspectionAttr
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_suffixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_orm_only_adapt = True
_orm_only_from_obj_alias = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
if not self._orm_only_adapt:
orm_only = False
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
orm_only if self._orm_only_from_obj_alias else False,
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
# TODO: self._select_from_entity is not a mapper
# so this method is misnamed
return self._select_from_entity \
if self._select_from_entity is not None \
else self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _bind_mapper(self):
ezero = self._mapper_zero()
if ezero is not None:
insp = inspect(ezero)
if not insp.is_clause_element:
return insp.mapper
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
.. note:: The :meth:`.Query.with_labels` method *only* applies
the output of :attr:`.Query.statement`, and *not* to any of
the result-row invoking systems of :class:`.Query` itself, e.g.
:meth:`.Query.first`, :meth:`.Query.all`, etc. To execute
a query using :meth:`.Query.with_labels`, invoke the
:attr:`.Query.statement` using :meth:`.Session.execute`::
result = session.execute(query.with_labels().statement)
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True,
"max_row_buffer": count})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading_relationships`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
return self._get_impl(ident, loading.load_on_ident)
def _get_impl(self, ident, fallback_fn):
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return fallback_fn(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper_zero = inspect(self._mapper_zero()).mapper
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is mapper_zero:
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
:meth:`.Query.from_self` essentially turns the SELECT statement
into a SELECT of itself. Given a query such as::
q = session.query(User).filter(User.name.like('e%'))
Given the :meth:`.Query.from_self` version::
q = session.query(User).filter(User.name.like('e%')).from_self()
This query renders as:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1) AS anon_1
There are lots of cases where :meth:`.Query.from_self` may be useful.
A simple one is where above, we may want to apply a row LIMIT to
the set of user objects we query against, and then apply additional
joins against that row-limited set::
q = session.query(User).filter(User.name.like('e%')).\\
limit(5).from_self().\\
join(User.addresses).filter(Address.email.like('q%'))
The above query joins to the ``Address`` entity but only against the
first five results of the ``User`` query:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Automatic Aliasing**
Another key behavior of :meth:`.Query.from_self` is that it applies
**automatic aliasing** to the entities inside the subquery, when
they are referenced on the outside. Above, if we continue to
refer to the ``User`` entity without any additional aliasing applied
to it, those references wil be in terms of the subquery::
q = session.query(User).filter(User.name.like('e%')).\\
limit(5).from_self().\\
join(User.addresses).filter(Address.email.like('q%')).\\
order_by(User.name)
The ORDER BY against ``User.name`` is aliased to be in terms of the
inner subquery:
.. sourcecode:: sql
SELECT anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name
The automatic aliasing feature only works in a **limited** way,
for simple filters and orderings. More ambitious constructions
such as referring to the entity in joins should prefer to use
explicit subquery objects, typically making use of the
:meth:`.Query.subquery` method to produce an explicit subquery object.
Always test the structure of queries by viewing the SQL to ensure
a particular structure does what's expected!
**Changing the Entities**
:meth:`.Query.from_self` also includes the ability to modify what
columns are being queried. In our example, we want ``User.id``
to be queried by the inner query, so that we can join to the
``Address`` entity on the outside, but we only wanted the outer
query to return the ``Address.email`` column::
q = session.query(User).filter(User.name.like('e%')).\\
limit(5).from_self(Address.email).\\
join(User.addresses).filter(Address.email.like('q%'))
yielding:
.. sourcecode:: sql
SELECT address.email AS address_email
FROM (SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE "user".name LIKE :name_1
LIMIT :param_1) AS anon_1
JOIN address ON anon_1.user_id = address.user_id
WHERE address.email LIKE :email_1
**Looking out for Inner / Outer Columns**
Keep in mind that when referring to columns that originate from
inside the subquery, we need to ensure they are present in the
columns clause of the subquery itself; this is an ordinary aspect of
SQL. For example, if we wanted to load from a joined entity inside
the subquery using :func:`.contains_eager`, we need to add those
columns. Below illustrates a join of ``Address`` to ``User``,
then a subquery, and then we'd like :func:`.contains_eager` to access
the ``User`` columns::
q = session.query(Address).join(Address.user).\\
filter(User.name.like('e%'))
q = q.add_entity(User).from_self().\\
options(contains_eager(Address.user))
We use :meth:`.Query.add_entity` above **before** we call
:meth:`.Query.from_self` so that the ``User`` columns are present
in the inner subquery, so that they are available to the
:func:`.contains_eager` modifier we are using on the outside,
producing:
.. sourcecode:: sql
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
anon_1.user_id AS anon_1_user_id,
anon_1.user_name AS anon_1_user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1
If we didn't call ``add_entity(User)``, but still asked
:func:`.contains_eager` to load the ``User`` entity, it would be
forced to add the table on the outside without the correct
join criteria - note the ``anon1, "user"`` phrase at
the end:
.. sourcecode:: sql
-- incorrect query
SELECT anon_1.address_id AS anon_1_address_id,
anon_1.address_email AS anon_1_address_email,
anon_1.address_user_id AS anon_1_address_user_id,
"user".id AS user_id,
"user".name AS user_name
FROM (
SELECT address.id AS address_id,
address.email AS address_email,
address.user_id AS address_user_id
FROM address JOIN "user" ON "user".id = address.user_id
WHERE "user".name LIKE :name_1) AS anon_1, "user"
:param \*entities: optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
q._select_from_entity = self._mapper_zero()
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes', '_suffixes'
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading_relationships` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria may be specified as comma separated; the effect
is that they will be joined together using the :func:`.and_`
function::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='ed@foo.com').\\
filter(a_alias.email_address=='ed@bar.com')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == 'ed@foo.com').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
', '.join(sorted(kwargs)))
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
', '.join(sorted(kwargs)))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
keylist = util.to_list(keys)
for idx, arg1 in enumerate(keylist):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
if idx == len(keylist) - 1:
util.warn(
"Pathed join target %s has already "
"been joined to; skipping" % prop)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
.. note::
The :meth:`.distinct` call includes logic that will automatically
add columns from the ORDER BY of the query to the columns
clause of the SELECT statement, to satisfy the common need
of the database backend that ORDER BY columns be part of the
SELECT list when DISTINCT is used. These columns *are not*
added to the list of columns actually fetched by the
:class:`.Query`, however, so would not affect results.
The columns are passed through when using the
:attr:`.Query.statement` accessor, however.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
.. seealso::
:meth:`.HasPrefixes.prefix_with`
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
@_generative()
def suffix_with(self, *suffixes):
"""Apply the suffix to the query and return the newly resulting
``Query``.
:param \*suffixes: optional suffixes, typically strings,
not using any commas.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.prefix_with`
:meth:`.HasSuffixes.suffix_with`
"""
if self._suffixes:
self._suffixes += suffixes
else:
self._suffixes = suffixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling :meth:`.Query.first` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one_or_none(self):
"""Return at most one result or raise an exception.
Returns ``None`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.Query.one_or_none` results in an execution of the underlying
query.
.. versionadded:: 1.0.9
Added :meth:`.Query.one_or_none`
.. seealso::
:meth:`.Query.first`
:meth:`.Query.one`
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()")
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that returns only scalar values
as opposed to full identity-mapped entities.
Calling :meth:`.one` results in an execution of the underlying query.
.. seealso::
:meth:`.Query.first`
:meth:`.Query.one_or_none`
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._bind_mapper(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(querycontext.query, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
'entity': User
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
'entity': User
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias,
'entity': user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(insp_ent, 'is_aliased_class', False),
'expr': ent.expr,
'entity':
getattr(insp_ent, "entity", None)
if ent.entity_zero is not None
and not insp_ent.is_clause_element
else None
}
for ent, insp_ent in [
(
_ent,
(inspect(_ent.entity_zero)
if _ent.entity_zero is not None else None)
)
for _ent in self._entities
]
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'suffixes': self._suffixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
E.g.::
sess.query(User).filter(User.age == 25).\\
delete(synchronize_session=False)
sess.query(User).filter(User.age == 25).\\
delete(synchronize_session='evaluate')
.. warning:: The :meth:`.Query.delete` method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query deletes**
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON DELETE CASCADE/SET
NULL/etc. is configured for any foreign key references
which require it, otherwise the database may emit an
integrity violation if foreign key references are being
enforced.
After the DELETE, dependent objects in the
:class:`.Session` which were impacted by an ON DELETE
may not contain the current state, or may have been
deleted. This issue is resolved once the
:class:`.Session` is expired, which normally occurs upon
:meth:`.Session.commit` or can be forced by using
:meth:`.Session.expire_all`. Accessing an expired
object whose row has been deleted will invoke a SELECT
to locate the row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is
raised.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events **are not invoked** from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to
act upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate', update_args=None):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\\
update({User.age: User.age - 10}, synchronize_session=False)
sess.query(User).filter(User.age == 25).\\
update({"age": User.age - 10}, synchronize_session='evaluate')
.. warning:: The :meth:`.Query.update` method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values. If :ref:`parameter-ordered
mode <updates_order_parameters>` is desired, the values can be
passed as a list of 2-tuples;
this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag is passed to the :paramref:`.Query.update.update_args` dictionary
as well.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:param update_args: Optional dictionary, if present will be passed
to the underlying :func:`.update` construct as the ``**kw`` for
the object. May be used to pass dialect-specific arguments such
as ``mysql_limit``, as well as other special arguments such as
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`.
.. versionadded:: 1.0.0
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query updates**
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON UPDATE CASCADE is
configured for any foreign key references which require
it, otherwise the database may emit an integrity
violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the
:class:`.Session` which were impacted by an ON UPDATE
CASCADE may not contain the current state; this issue is
resolved once the :class:`.Session` is expired, which
normally occurs upon :meth:`.Session.commit` or can be
forced by using :meth:`.Session.expire_all`.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The method supports multiple table updates, as detailed
in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and
other multiple table mappings. However, the **join
condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table update to explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, an UPDATE of the ``Engineer``
local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events **are not invoked from this method**. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to
act upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_args = update_args or {}
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values, update_args)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
if self.dispatch.before_compile:
for fn in self.dispatch.before_compile:
new_query = fn(self)
if new_query is not None:
self = new_query
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
# else "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
only_load_props = query._only_load_props
refresh_state = context.refresh_state
else:
only_load_props = refresh_state = None
_instance = loading._instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=only_load_props,
refresh_state=refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
loading._setup_entity_query(
context, self.mapper, self,
self.path, adapter, context.primary_columns,
with_polymorphic=self._with_polymorphic,
only_load_props=query._only_load_props,
polymorphic_discriminator=self._polymorphic_discriminator)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(InspectionAttr):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
is_clause_element = False
is_mapper = False
is_aliased_class = False
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
search_entities = True
check_column = False
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
search_entities = False
check_column = True
_entity = None
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
_entity = getattr(column, '_parententity', None)
if _entity is not None:
search_entities = False
self._label_name = column.key
column = column._query_clause_element()
check_column = True
if isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
if hasattr(column, '_select_iterable'):
# break out an object like Table into
# individual columns
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
elif not check_column:
self._label_name = getattr(column, 'key', None)
search_entities = True
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
if not search_entities:
self.entity_zero = _entity
if _entity:
self.entities = [_entity]
else:
self.entities = []
self._from_entities = set(self.entities)
else:
all_elements = [
elem for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
]
self.entities = util.unique_list([
elem._annotations['parententity']
for elem in all_elements
if 'parententity' in elem._annotations
])
self._from_entities = set([
elem._annotations['parententity']
for elem in all_elements
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
])
if self.entities:
self.entity_zero = self.entities[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
if self.actual_froms.intersection(ext_info.selectable._from_objects):
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def row_processor(self, query, context, result):
if ('fetch_column', self) in context.attributes:
column = context.attributes[('fetch_column', self)]
else:
column = query._adapt_clause(self.column, False, True)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = query._adapt_clause(self.column, False, True)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
context.attributes[('fetch_column', self)] = column
def __str__(self):
return str(self.column)
class QueryContext(object):
__slots__ = (
'multi_row_eager_loaders', 'adapter', 'froms', 'for_update',
'query', 'session', 'autoflush', 'populate_existing',
'invoke_all_eagers', 'version_check', 'refresh_state',
'primary_columns', 'secondary_columns', 'eager_order_by',
'eager_joins', 'create_eager_joins', 'propagate_options',
'attributes', 'statement', 'from_clause', 'whereclause',
'order_by', 'labels', '_for_update_arg', 'runid', 'partials'
)
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.multi_row_eager_loaders = False
self.adapter = None
self.froms = ()
self.for_update = None
self.query = query
self.session = query.session
self.autoflush = query._autoflush
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| cc0-1.0 |
friedrich420/S5-AEL-Kernel | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
annarev/tensorflow | tensorflow/python/keras/integration_test/multi_worker_tutorial_test.py | 2 | 13552 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for multi-worker training tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import re
import unittest
import uuid
import zipfile
from absl import logging
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
PER_WORKER_BATCH_SIZE = 64
NUM_WORKERS = 2
NUM_EPOCHS = 2
NUM_STEPS_PER_EPOCH = 50
def _is_chief(task_type, task_id):
# Note: there are two possible `TF_CONFIG` configuration.
# 1) In addition to `worker` tasks, a `chief` task type is use;
# in this case, this function should be modified to
# `return task_type == 'chief'`.
# 2) Only `worker` task type is used; in this case, worker 0 is
# regarded as the chief. The implementation demonstrated here
# is for this case.
return task_type == 'worker' and task_id == 0
def _get_temp_dir(dirpath, task_id):
base_dirpath = 'workertemp_' + str(task_id)
temp_dir = os.path.join(dirpath, base_dirpath)
tf.io.gfile.makedirs(temp_dir)
return temp_dir
def write_filepath(filepath, task_type, task_id):
dirpath = os.path.dirname(filepath)
base = os.path.basename(filepath)
if not _is_chief(task_type, task_id):
dirpath = _get_temp_dir(dirpath, task_id)
return os.path.join(dirpath, base)
class MultiWorkerTutorialTest(parameterized.TestCase, tf.test.TestCase):
"""Test of multi-worker training flow in tutorials on tensorflow.org.
Please see below test method docs for what actual tutorial is being covered.
"""
# TODO(rchao): Add a test to demonstrate gather with MWMS.
@contextlib.contextmanager
def skip_fetch_failure_exception(self):
try:
yield
except zipfile.BadZipfile as e:
# There can be a race when multiple processes are downloading the data.
# Skip the test if that results in loading errors.
self.skipTest('Data loading error: Bad magic number for file header.')
except Exception as e: # pylint: disable=broad-except
if 'URL fetch failure' in str(e):
self.skipTest('URL fetch error not considered failure of the test.')
else:
raise
def mnist_dataset(self):
path_to_use = 'mnist_{}.npz'.format(str(uuid.uuid4()))
with self.skip_fetch_failure_exception():
(x_train,
y_train), _ = tf.keras.datasets.mnist.load_data(path=path_to_use)
# The `x` arrays are in uint8 and have values in the range [0, 255].
# We need to convert them to float32 with values in the range [0, 1]
x_train = x_train / np.float32(255)
y_train = y_train.astype(np.int64)
train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(60000)
return train_dataset
def dataset_fn(self, global_batch_size, input_context):
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = self.mnist_dataset()
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
dataset = dataset.batch(batch_size)
return dataset
def build_cnn_model(self):
return tf.keras.Sequential([
tf.keras.layers.Input(shape=(28, 28)),
tf.keras.layers.Reshape(target_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
def build_and_compile_cnn_model(self):
model = self.build_cnn_model()
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
metrics=['accuracy'])
return model
@tf.__internal__.test.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'], tf_api_version=2))
def testSingleWorkerModelFit(self):
single_worker_dataset = self.mnist_dataset().batch(
PER_WORKER_BATCH_SIZE)
single_worker_model = self.build_and_compile_cnn_model()
single_worker_model.fit(single_worker_dataset, epochs=NUM_EPOCHS)
@tf.__internal__.test.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'], tf_api_version=2))
def testMwmsWithModelFit(self, mode):
"""Test multi-worker training flow demo'ed in go/multi-worker-with-keras.
This test should be kept in sync with the code samples in
go/multi-worker-with-keras.
Args:
mode: Runtime mode.
"""
def fn(model_path, checkpoint_dir):
global_batch_size = PER_WORKER_BATCH_SIZE * NUM_WORKERS
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
with strategy.scope():
multi_worker_model = self.build_and_compile_cnn_model()
callbacks = [
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(self.get_temp_dir(), 'checkpoint'))
]
multi_worker_dataset = strategy.distribute_datasets_from_function(
lambda input_context: self.dataset_fn(global_batch_size, input_context
))
multi_worker_model.fit(
multi_worker_dataset,
epochs=NUM_EPOCHS,
steps_per_epoch=50,
callbacks=callbacks)
task_type, task_id = (strategy.cluster_resolver.task_type,
strategy.cluster_resolver.task_id)
write_model_path = write_filepath(model_path, task_type, task_id)
multi_worker_model.save(write_model_path)
if not _is_chief(task_type, task_id):
tf.io.gfile.rmtree(os.path.dirname(write_model_path))
# Make sure chief finishes saving before non-chief's assertions.
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
if not tf.io.gfile.exists(model_path):
raise RuntimeError()
if tf.io.gfile.exists(write_model_path) != _is_chief(task_type, task_id):
raise RuntimeError()
with strategy.scope():
loaded_model = tf.keras.models.load_model(model_path)
loaded_model.fit(multi_worker_dataset, epochs=1, steps_per_epoch=1)
checkpoint = tf.train.Checkpoint(model=multi_worker_model)
write_checkpoint_dir = write_filepath(checkpoint_dir, task_type, task_id)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, directory=write_checkpoint_dir, max_to_keep=1)
checkpoint_manager.save()
if not _is_chief(task_type, task_id):
tf.io.gfile.rmtree(write_checkpoint_dir)
# Make sure chief finishes saving before non-chief's assertions.
tf.__internal__.distribute.multi_process_runner.get_barrier().wait()
if not tf.io.gfile.exists(checkpoint_dir):
raise RuntimeError()
if tf.io.gfile.exists(write_checkpoint_dir) != _is_chief(
task_type, task_id):
raise RuntimeError()
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint.restore(latest_checkpoint)
multi_worker_model.fit(multi_worker_dataset, epochs=1, steps_per_epoch=1)
logging.info('testMwmsWithModelFit successfully ends')
model_path = os.path.join(self.get_temp_dir(), 'model.tf')
checkpoint_dir = os.path.join(self.get_temp_dir(), 'ckpt')
try:
mpr_result = tf.__internal__.distribute.multi_process_runner.run(
fn,
tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
num_workers=NUM_WORKERS),
args=(model_path, checkpoint_dir),
return_output=True)
except tf.errors.UnavailableError:
self.skipTest('Skipping rare disconnection among the workers.')
self.assertTrue(
any([
'testMwmsWithModelFit successfully ends' in msg
for msg in mpr_result.stdout
]))
def extract_accuracy(worker_id, input_string):
match = re.match(
r'\[worker\-{}\].*accuracy: (\d+\.\d+).*'.format(worker_id),
input_string)
return None if match is None else float(match.group(1))
for worker_id in range(NUM_WORKERS):
accu_result = tf.nest.map_structure(
lambda x: extract_accuracy(worker_id, x), # pylint: disable=cell-var-from-loop
mpr_result.stdout)
self.assertTrue(
any(accu_result), 'Every worker is supposed to have accuracy result.')
@tf.__internal__.test.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'], tf_api_version=2))
def testMwmsWithCtl(self, mode):
"""Test multi-worker CTL training flow demo'ed in a to-be-added tutorial."""
def proc_func(checkpoint_dir):
global_batch_size = PER_WORKER_BATCH_SIZE * NUM_WORKERS
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
try:
with strategy.scope():
multi_worker_model = self.build_cnn_model()
multi_worker_dataset = strategy.distribute_datasets_from_function(
lambda input_context: self.dataset_fn(global_batch_size, # pylint: disable=g-long-lambda
input_context))
optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001)
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='train_accuracy')
@tf.function
def train_step(iterator):
"""Training step function."""
def step_fn(inputs):
"""Per-Replica step function."""
x, y = inputs
with tf.GradientTape() as tape:
predictions = multi_worker_model(x, training=True)
per_batch_loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)(y, predictions)
loss = tf.nn.compute_average_loss(
per_batch_loss, global_batch_size=global_batch_size)
grads = tape.gradient(loss, multi_worker_model.trainable_variables)
optimizer.apply_gradients(
zip(grads, multi_worker_model.trainable_variables))
train_accuracy.update_state(y, predictions)
return loss
per_replica_losses = strategy.run(step_fn, args=(next(iterator),))
return strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
epoch = tf.Variable(
initial_value=tf.constant(0, dtype=tf.dtypes.int64), name='epoch')
step_in_epoch = tf.Variable(
initial_value=tf.constant(0, dtype=tf.dtypes.int64),
name='step_in_epoch')
task_type, task_id = (strategy.cluster_resolver.task_type,
strategy.cluster_resolver.task_id)
checkpoint = tf.train.Checkpoint(
model=multi_worker_model, epoch=epoch, step_in_epoch=step_in_epoch)
write_checkpoint_dir = write_filepath(checkpoint_dir, task_type,
task_id)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, directory=write_checkpoint_dir, max_to_keep=1)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
if latest_checkpoint:
checkpoint.restore(latest_checkpoint)
while epoch.numpy() < NUM_EPOCHS:
iterator = iter(multi_worker_dataset)
total_loss = 0.0
num_batches = 0
while step_in_epoch.numpy() < NUM_STEPS_PER_EPOCH:
total_loss += train_step(iterator)
num_batches += 1
step_in_epoch.assign_add(1)
train_loss = total_loss / num_batches
logging.info('Epoch: %d, accuracy: %f, train_loss: %f.',
epoch.numpy(), train_accuracy.result(), train_loss)
train_accuracy.reset_states()
checkpoint_manager.save()
if not _is_chief(task_type, task_id):
tf.io.gfile.rmtree(write_checkpoint_dir)
epoch.assign_add(1)
step_in_epoch.assign(0)
except tf.errors.UnavailableError as e:
logging.info('UnavailableError occurred: %r', e)
raise unittest.SkipTest('Skipping test due to UnavailableError')
logging.info('testMwmsWithCtl successfully ends')
checkpoint_dir = os.path.join(self.get_temp_dir(), 'ckpt')
mpr_result = tf.__internal__.distribute.multi_process_runner.run(
proc_func,
tf.__internal__.distribute.multi_process_runner.create_cluster_spec(
num_workers=NUM_WORKERS),
return_output=True,
args=(checkpoint_dir,))
self.assertTrue(
any([
'testMwmsWithCtl successfully ends' in msg
for msg in mpr_result.stdout
]))
if __name__ == '__main__':
tf.__internal__.distribute.multi_process_runner.test_main()
| apache-2.0 |
stvstnfrd/edx-platform | lms/djangoapps/courseware/tests/test_context_processor.py | 1 | 1674 | """
Unit tests for courseware context_processor
"""
from django.contrib.auth.models import AnonymousUser
from mock import Mock
from lms.djangoapps.courseware.context_processor import user_timezone_locale_prefs
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class UserPrefContextProcessorUnitTest(ModuleStoreTestCase):
"""
Unit test for courseware context_processor
"""
def setUp(self):
super(UserPrefContextProcessorUnitTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.user = UserFactory.create()
self.request = Mock()
self.request.user = self.user
def test_anonymous_user(self):
self.request.user = AnonymousUser()
context = user_timezone_locale_prefs(self.request)
assert context['user_timezone'] is None
assert context['user_language'] is None
def test_no_timezone_preference(self):
set_user_preference(self.user, 'pref-lang', 'en')
context = user_timezone_locale_prefs(self.request)
assert context['user_timezone'] is None
assert context['user_language'] is not None
assert context['user_language'] == 'en'
def test_no_language_preference(self):
set_user_preference(self.user, 'time_zone', 'Asia/Tokyo')
context = user_timezone_locale_prefs(self.request)
assert context['user_language'] is None
assert context['user_timezone'] is not None
assert context['user_timezone'] == 'Asia/Tokyo'
| agpl-3.0 |
tagatac/scrapy | scrapy/loader/processors.py | 145 | 2850 | """
This module provides some commonly used processors for Item Loaders.
See documentation in docs/topics/loaders.rst
"""
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.datatypes import MergeDict
from .common import wrap_loader_context
class MapCompose(object):
def __init__(self, *functions, **default_loader_context):
self.functions = functions
self.default_loader_context = default_loader_context
def __call__(self, value, loader_context=None):
values = arg_to_iter(value)
if loader_context:
context = MergeDict(loader_context, self.default_loader_context)
else:
context = self.default_loader_context
wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions]
for func in wrapped_funcs:
next_values = []
for v in values:
next_values += arg_to_iter(func(v))
values = next_values
return values
class Compose(object):
def __init__(self, *functions, **default_loader_context):
self.functions = functions
self.stop_on_none = default_loader_context.get('stop_on_none', True)
self.default_loader_context = default_loader_context
def __call__(self, value, loader_context=None):
if loader_context:
context = MergeDict(loader_context, self.default_loader_context)
else:
context = self.default_loader_context
wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions]
for func in wrapped_funcs:
if value is None and self.stop_on_none:
break
value = func(value)
return value
class TakeFirst(object):
def __call__(self, values):
for value in values:
if value is not None and value != '':
return value
class Identity(object):
def __call__(self, values):
return values
class SelectJmes(object):
"""
Query the input string for the jmespath (given at instantiation),
and return the answer
Requires : jmespath(https://github.com/jmespath/jmespath)
Note: SelectJmes accepts only one input element at a time.
"""
def __init__(self, json_path):
self.json_path = json_path
import jmespath
self.compiled_path = jmespath.compile(self.json_path)
def __call__(self, value):
"""Query value for the jmespath query and return answer
:param value: a data structure (dict, list) to extract from
:return: Element extracted according to jmespath query
"""
return self.compiled_path.search(value)
class Join(object):
def __init__(self, separator=u' '):
self.separator = separator
def __call__(self, values):
return self.separator.join(values)
| bsd-3-clause |
LibiSC/tab10test | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
hzj123/56th | pombola/core/tests/test_positions.py | 3 | 18319 | import random
import datetime
from django.contrib.contenttypes.models import ContentType
from django.core import exceptions
from django.test import TestCase
from django_date_extensions.fields import ApproximateDate
from pombola.core import models
class PositionTest(TestCase):
def setUp(self):
self.person = models.Person(
legal_name = 'Test Person',
slug = 'test-person',
)
self.person.save()
self.organisation_kind = models.OrganisationKind(
name = 'Foo',
slug = 'foo',
)
self.organisation_kind.save()
self.organisation = models.Organisation(
name = 'Test Org',
slug = 'test-org',
kind = self.organisation_kind,
)
self.organisation.save()
self.title = models.PositionTitle.objects.create(
name = 'Test title',
slug = 'test-title',
)
def tearDown(self):
self.person.delete()
self.organisation.delete()
self.organisation_kind.delete()
self.title.delete()
def test_unicode(self):
"""Check that missing attributes don't crash"""
position = models.Position(
person = self.person,
)
self.assertEqual( str(position), 'Test Person (??? at ???)' )
def test_display_dates(self):
"""Check that the date that is displayed is correct"""
position = models.Position(person = self.person)
# Dates that will be used for testing
past = ApproximateDate( past=True )
y2000 = ApproximateDate( year=2000 )
y2100 = ApproximateDate( year=2100 )
future = ApproximateDate( future=True )
# test grid: start, end, uot
tests = (
( None, None, "" ),
( None, past, "Ended" ),
( None, y2000, "Ended 2000" ),
( None, y2100, "Will end 2100" ),
( None, future, "Ongoing" ),
( past, None, "Started" ),
( past, past, "Ended" ),
( past, y2000, "Ended 2000" ),
( past, y2100, "Will end 2100" ),
( past, future, "Ongoing" ),
( y2000, None, "Started 2000" ),
( y2000, past, "Started 2000, now ended" ),
( y2000, y2000, "2000 → 2000" ),
( y2000, y2100, "2000 → 2100" ),
( y2000, future, "Started 2000" ),
( y2100, None, "Will start 2100" ),
( y2100, y2100, "2100 → 2100" ),
( y2100, future, "Will start 2100" ),
( future, None, "Not started yet" ),
( future, future, "Not started yet" ),
# These are impossible, but we don't validate against them. Best check something
# sensible is returned. Might need if we ever do a site for Time Lords!
( y2100, past, "Will start 2100, now ended" ),
( y2100, y2000, "2100 → 2000" ),
( future, past, "Ended" ),
( future, y2000, "Ended 2000" ),
( future, y2100, "Will end 2100" ),
)
for start_date, end_date, expected in tests:
position.start_date = start_date
position.end_date = end_date
actual = position.display_dates()
self.assertEqual(
actual,
expected,
"%s -> %s should be '%s', not '%s'" % (start_date, end_date, expected, actual)
)
def test_past_end_dates(self):
"""
Check that the entries can be created with past dates. Issues could
occur as past dates are before all others, so a past end_date would come
before a start_date. Should have a special case for this.
"""
# Dates that will be used for testing
past = ApproximateDate( past=True )
y2000 = ApproximateDate( year=2000 )
y2100 = ApproximateDate( year=2100 )
future = ApproximateDate( future=True )
tests = (
# [start, end, exception]
[None, past, None],
[past, past, None],
[y2000, past, None],
[y2100, past, None],
[future, past, None],
# Turns out that there is no validation for start > end. Perhaps there should be..
# [y2100, past, exceptions.ValidationError],
# [future, past, exceptions.ValidationError],
)
def create_position(**kwargs):
pos = models.Position(**kwargs)
pos._set_sorting_dates()
pos.full_clean() # needed as otherwise no validation occurs. Genius!
for start_date, end_date, exception in tests:
kwargs = dict(person=self.person, title=self.title, start_date=start_date, end_date=end_date)
if exception:
self.assertRaises(exception, create_position, **kwargs)
else:
# Should just work without throwing exception
create_position(**kwargs)
def test_sorting(self):
"""Check that the sorting is as expected"""
position_dates = [
# start_date, end_date,
( 'future', 'future', ),
( 'future', None, ),
( '2002', 'future', ),
( '2001', 'future', ),
( 'past', 'future' ),
( None, 'future', ),
( 'future', '2010', ),
( '2010', None, ),
( '2002', '2010', ),
( '2001', '2010', ),
( 'past', '2010' ),
( None, '2010', ),
( 'future', '2009', ),
( '2009', None, ),
( '2002', '2009', ),
( '2001', '2009', ),
( 'past', '2009' ),
( None, '2009', ),
( '2002', None, ),
( '2001', None, ),
( 'future', 'past' ), # <-- this is nonsensical
( '2010', 'past' ),
( '2009', 'past' ),
( 'past', 'past' ),
( None, 'past' ),
( 'past', None ),
( None, None, ),
]
# create the positions, store the config in the notes and create a list to compare against
position_expected_order = []
positions_to_save = []
def approx_date_from_entry(entry):
if entry is None:
return None
if entry == 'future':
return ApproximateDate(future=True)
if entry == 'past':
return ApproximateDate(past=True)
return ApproximateDate(year=int(entry))
for dates in position_dates:
note = u"%s -> %s" % dates
start_date = approx_date_from_entry(dates[0])
end_date = approx_date_from_entry(dates[1])
position_expected_order.append( note )
positions_to_save.append(
models.Position(
start_date= start_date,
end_date = end_date,
note = note,
person = self.person,
)
)
# save all the positions, but shuffle them first
random.shuffle( positions_to_save )
for position in positions_to_save:
position.save()
# get all the positions from db and check that they are sorted correctly
positions_from_db = self.person.position_set.all()
position_actual_order = [ p.note for p in positions_from_db ]
# print
# print position_actual_order
# print
# print position_expected_order
# print
self.maxDiff = None
self.assertEqual( position_expected_order, position_actual_order )
def test_have_at_least_one_attribute(self):
"""
Positions must have a person, and at least one more attribute.
Otherwise they don't mean anything
"""
pos = models.Position(
person = self.person,
)
# call this manually so that the validation does not get all confused
# about it
pos._set_sorting_dates()
with self.assertRaises(exceptions.ValidationError):
pos.full_clean()
# If this does not blow up then it is OK
pos.organisation = self.organisation
pos.full_clean()
def test_place_is_required(self):
"""
Some job titles (like an MP) are meaningless if there is no place
associated with them.
"""
# create position with no place
position = models.Position(
person = self.person,
title = self.title,
)
position._set_sorting_dates()
position.full_clean()
# Change the title to require a place
self.title.requires_place = True
with self.assertRaises(exceptions.ValidationError):
position.full_clean()
# give the pos a place and check that it now validates
position.place = models.Place( name='Test Place', slug='test-place')
position.full_clean()
# put it back
self.title.requires_place = False
def test_position_identifier(self):
position = models.Position.objects.create(
person = self.person,
title = self.title,
organisation = self.organisation)
self.id_a = models.Identifier.objects.create(
identifier="/positions/1",
scheme="org.mysociety.za",
object_id=position.id,
content_type=ContentType.objects.get_for_model(models.Position))
position_mysociety_id = position.get_identifier('org.mysociety.za')
self.assertEqual(position_mysociety_id, '/positions/1')
self.id_a.delete()
position.delete()
def test_currently_active(self):
"""Test that the currently active filter warks"""
now = datetime.datetime.now()
earlier = now - datetime.timedelta( days = 7 )
much_earlier = now - datetime.timedelta( days = 365 )
later = now + datetime.timedelta( days = 7 )
much_later = now + datetime.timedelta( days = 365 )
pos_qs = models.Position.objects.all()
# check that there are no positions
self.assertEqual(
models.Position.objects.all().currently_active().count(), 0
)
# create position which is currently active
position = models.Position.objects.create(
person = self.person,
title = self.title,
start_date = ApproximateDate( year=earlier.year, month=earlier.month, day=earlier.day ),
end_date = ApproximateDate( year=later.year, month=later.month, day=later.day ),
)
# check that we match by default
self.assertEqual( pos_qs.currently_active().count(), 1 )
self.assertEqual( pos_qs.currently_inactive().count(), 0 )
# check valid date ranges
self.assertEqual( pos_qs.currently_active( earlier ).count(), 1 )
self.assertEqual( pos_qs.currently_active( now ).count(), 1 )
self.assertEqual( pos_qs.currently_active( later ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( earlier ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( now ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( later ).count(), 0 )
# check that much earlier or much later don't match
self.assertEqual( pos_qs.currently_active( much_earlier ).count(), 0 )
self.assertEqual( pos_qs.currently_active( much_later ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( much_earlier ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( much_later ).count(), 1 )
# check future dates
position.start_date = ApproximateDate( year=earlier.year, month=earlier.month, day=earlier.day )
position.end_date = ApproximateDate(future=True)
position.save()
# check that we match by default
self.assertEqual( pos_qs.currently_active().count(), 1 )
self.assertEqual( pos_qs.currently_inactive().count(), 0 )
# check valid date ranges
self.assertEqual( pos_qs.currently_active( earlier ).count(), 1 )
self.assertEqual( pos_qs.currently_active( now ).count(), 1 )
self.assertEqual( pos_qs.currently_active( later ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( earlier ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( now ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( later ).count(), 0 )
# check that much earlier or much later don't match
self.assertEqual( pos_qs.currently_active( much_earlier ).count(), 0 )
self.assertEqual( pos_qs.currently_active( much_later ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( much_earlier ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( much_later ).count(), 0 )
# check absent end dates
position.start_date = ApproximateDate( year=earlier.year, month=earlier.month, day=earlier.day )
position.end_date = None
position.save()
# check that we match by default
self.assertEqual( pos_qs.currently_active().count(), 1 )
self.assertEqual( pos_qs.currently_inactive().count(), 0 )
# check valid date ranges
self.assertEqual( pos_qs.currently_active( earlier ).count(), 1 )
self.assertEqual( pos_qs.currently_active( now ).count(), 1 )
self.assertEqual( pos_qs.currently_active( later ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( earlier ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( now ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( later ).count(), 0 )
# check that much earlier or much later don't match
self.assertEqual( pos_qs.currently_active( much_earlier ).count(), 0 )
self.assertEqual( pos_qs.currently_active( much_later ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( much_earlier ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( much_later ).count(), 0 )
# check absent start and end dates
position.start_date = None
position.end_date = None
position.save()
# check that we match by default
self.assertEqual( pos_qs.currently_active().count(), 1 )
self.assertEqual( pos_qs.currently_inactive().count(), 0 )
# check valid date ranges
self.assertEqual( pos_qs.currently_active( earlier ).count(), 1 )
self.assertEqual( pos_qs.currently_active( now ).count(), 1 )
self.assertEqual( pos_qs.currently_active( later ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( earlier ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( now ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( later ).count(), 0 )
# check that much earlier or much later don't match
self.assertEqual( pos_qs.currently_active( much_earlier ).count(), 1 )
self.assertEqual( pos_qs.currently_active( much_later ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( much_earlier ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( much_later ).count(), 0 )
# check future start dates
position.start_date = ApproximateDate(future=1)
position.end_date = None
position.save()
# check that we match by default
self.assertEqual( pos_qs.currently_active().count(), 0 )
self.assertEqual( pos_qs.currently_inactive().count(), 1 )
# check valid date ranges
self.assertEqual( pos_qs.currently_active( earlier ).count(), 0 )
self.assertEqual( pos_qs.currently_active( now ).count(), 0 )
self.assertEqual( pos_qs.currently_active( later ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( earlier ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( now ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( later ).count(), 1 )
# check that much earlier or much later don't match
self.assertEqual( pos_qs.currently_active( much_earlier ).count(), 0 )
self.assertEqual( pos_qs.currently_active( much_later ).count(), 0 )
self.assertEqual( pos_qs.currently_inactive( much_earlier ).count(), 1 )
self.assertEqual( pos_qs.currently_inactive( much_later ).count(), 1 )
# check partial dates
mid_2010 = datetime.date(year=2010, month=6, day=1)
mid_2011 = datetime.date(year=2011, month=6, day=1)
mid_2012 = datetime.date(year=2012, month=6, day=1)
mid_2013 = datetime.date(year=2013, month=6, day=1)
position.start_date = ApproximateDate(year=2011)
position.end_date = ApproximateDate(year=2012)
position.save()
# from django.forms.models import model_to_dict
# from pprint import pprint
# pprint( model_to_dict( position ) )
self.assertEqual( pos_qs.currently_active(mid_2010).count(), 0 )
self.assertEqual( pos_qs.currently_active(mid_2011).count(), 1 )
self.assertEqual( pos_qs.currently_active(mid_2012).count(), 1 )
self.assertEqual( pos_qs.currently_active(mid_2013).count(), 0 )
self.assertEqual( pos_qs.currently_inactive(mid_2010).count(), 1 )
self.assertEqual( pos_qs.currently_inactive(mid_2011).count(), 0 )
self.assertEqual( pos_qs.currently_inactive(mid_2012).count(), 0 )
self.assertEqual( pos_qs.currently_inactive(mid_2013).count(), 1 )
| agpl-3.0 |
havard024/prego | crm/lib/python2.7/site-packages/docutils/transforms/components.py | 196 | 1993 | # $Id: components.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Docutils component-related transforms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
class Filter(Transform):
"""
Include or exclude elements which depend on a specific Docutils component.
For use with `nodes.pending` elements. A "pending" element's dictionary
attribute ``details`` must contain the keys "component" and "format". The
value of ``details['component']`` must match the type name of the
component the elements depend on (e.g. "writer"). The value of
``details['format']`` is the name of a specific format or context of that
component (e.g. "html"). If the matching Docutils component supports that
format or context, the "pending" element is replaced by the contents of
``details['nodes']`` (a list of nodes); otherwise, the "pending" element
is removed.
For example, the reStructuredText "meta" directive creates a "pending"
element containing a "meta" element (in ``pending.details['nodes']``).
Only writers (``pending.details['component'] == 'writer'``) supporting the
"html" format (``pending.details['format'] == 'html'``) will include the
"meta" element; it will be deleted from the output of all other writers.
"""
default_priority = 780
def apply(self):
pending = self.startnode
component_type = pending.details['component'] # 'reader' or 'writer'
format = pending.details['format']
component = self.document.transformer.components[component_type]
if component.supports(format):
pending.replace_self(pending.details['nodes'])
else:
pending.parent.remove(pending)
| mit |
Matt-Deacalion/django | tests/model_forms/models.py | 135 | 14126 | """
XX. Generating HTML forms from models
This is mostly just a reworking of the ``form_for_model``/``form_for_instance``
tests to use ``ModelForm``. As such, the text may not make sense in all cases,
and the examples are probably a poor fit for the ``ModelForm`` syntax. In other
words, most of these tests should be rewritten.
"""
from __future__ import unicode_literals
import datetime
import os
import tempfile
import uuid
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six.moves import range
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
class Person(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
@python_2_unicode_compatible
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer, models.CASCADE)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self, *args, **kwargs):
if not self.id:
self.created = datetime.date.today()
return super(Article, self).save(*args, **kwargs)
def __str__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article, models.CASCADE)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, models.CASCADE, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
@python_2_unicode_compatible
class Publication(models.Model):
title = models.CharField(max_length=30)
date_published = models.DateField()
def __str__(self):
return self.title
def default_mode():
return 'di'
def default_category():
return 3
class PublicationDefaults(models.Model):
MODE_CHOICES = (('di', 'direct'), ('de', 'delayed'))
CATEGORY_CHOICES = ((1, 'Games'), (2, 'Comics'), (3, 'Novel'))
title = models.CharField(max_length=30)
date_published = models.DateField(default=datetime.date.today)
mode = models.CharField(max_length=2, choices=MODE_CHOICES, default=default_mode)
category = models.IntegerField(choices=CATEGORY_CHOICES, default=default_category)
class Author(models.Model):
publication = models.OneToOneField(Publication, models.SET_NULL, null=True, blank=True)
full_name = models.CharField(max_length=255)
class Author1(models.Model):
publication = models.OneToOneField(Publication, models.SET_NULL, null=False)
full_name = models.CharField(max_length=255)
@python_2_unicode_compatible
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, models.CASCADE, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %s" % (self.writer, self.age)
class Document(models.Model):
myfile = models.FileField(upload_to='unused', blank=True)
@python_2_unicode_compatible
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __str__(self):
return self.description
class CustomFileField(models.FileField):
def save_form_data(self, instance, data):
been_here = getattr(self, 'been_saved', False)
assert not been_here, "save_form_data called more than once"
setattr(self, 'been_saved', True)
class CustomFF(models.Model):
f = CustomFileField(upload_to='unused', blank=True)
class FilePathModel(models.Model):
path = models.FilePathField(path=os.path.dirname(upath(__file__)), match=".*\.py$", blank=True)
try:
from PIL import Image # NOQA: detect if Pillow is installed
test_images = True
@python_2_unicode_compatible
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
@python_2_unicode_compatible
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height',
blank=True, null=True)
width = models.IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
except ImportError:
test_images = False
@python_2_unicode_compatible
class CommaSeparatedInteger(models.Model):
field = models.CommaSeparatedIntegerField(max_length=20)
def __str__(self):
return self.field
class Homepage(models.Model):
url = models.URLField()
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class Triple(models.Model):
left = models.IntegerField()
middle = models.IntegerField()
right = models.IntegerField()
class Meta:
unique_together = (('left', 'middle'), ('middle', 'right'))
class ArticleStatus(models.Model):
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
@python_2_unicode_compatible
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', models.SET_NULL, to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, models.SET_NULL, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
@python_2_unicode_compatible
class ExplicitPK(models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __str__(self):
return self.key
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.title
@python_2_unicode_compatible
class DateTimePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateTimeField(editable=False)
def __str__(self):
return self.title
class DerivedPost(Post):
pass
@python_2_unicode_compatible
class BigInt(models.Model):
biggie = models.BigIntegerField()
def __str__(self):
return six.text_type(self.biggie)
class MarkupField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super(MarkupField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
class CustomFieldForExclusionModel(models.Model):
name = models.CharField(max_length=10)
markup = MarkupField()
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
@python_2_unicode_compatible
class Colour(models.Model):
name = models.CharField(max_length=50)
def __iter__(self):
for number in range(5):
yield number
def __str__(self):
return self.name
class ColourfulItem(models.Model):
name = models.CharField(max_length=50)
colours = models.ManyToManyField(Colour)
class CustomErrorMessage(models.Model):
name1 = models.CharField(max_length=50,
validators=[validators.validate_slug],
error_messages={'invalid': 'Model custom error message.'})
name2 = models.CharField(max_length=50,
validators=[validators.validate_slug],
error_messages={'invalid': 'Model custom error message.'})
def clean(self):
if self.name1 == 'FORBIDDEN_VALUE':
raise ValidationError({'name1': [ValidationError('Model.clean() error messages.')]})
elif self.name1 == 'FORBIDDEN_VALUE2':
raise ValidationError({'name1': 'Model.clean() error messages (simpler syntax).'})
elif self.name1 == 'GLOBAL_ERROR':
raise ValidationError("Global error message.")
def today_callable_dict():
return {"last_action__gte": datetime.datetime.today()}
def today_callable_q():
return models.Q(last_action__gte=datetime.datetime.today())
class Character(models.Model):
username = models.CharField(max_length=100)
last_action = models.DateTimeField()
class StumpJoke(models.Model):
most_recently_fooled = models.ForeignKey(
Character,
models.CASCADE,
limit_choices_to=today_callable_dict,
related_name="+",
)
has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+")
# Model for #13776
class Student(models.Model):
character = models.ForeignKey(Character, models.CASCADE)
study = models.CharField(max_length=30)
# Model for #639
class Photo(models.Model):
title = models.CharField(max_length=30)
image = models.FileField(storage=temp_storage, upload_to='tests')
# Support code for the tests; this keeps track of how many times save()
# gets called on each instance.
def __init__(self, *args, **kwargs):
super(Photo, self).__init__(*args, **kwargs)
self._savecount = 0
def save(self, force_insert=False, force_update=False):
super(Photo, self).save(force_insert, force_update)
self._savecount += 1
class UUIDPK(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=30)
# Models for #24706
class StrictAssignmentFieldSpecific(models.Model):
title = models.CharField(max_length=30)
_should_error = False
def __setattr__(self, key, value):
if self._should_error is True:
raise ValidationError(message={key: "Cannot set attribute"}, code='invalid')
super(StrictAssignmentFieldSpecific, self).__setattr__(key, value)
class StrictAssignmentAll(models.Model):
title = models.CharField(max_length=30)
_should_error = False
def __setattr__(self, key, value):
if self._should_error is True:
raise ValidationError(message="Cannot set attribute", code='invalid')
super(StrictAssignmentAll, self).__setattr__(key, value)
| bsd-3-clause |
destinokm/DesHabitat | backend/articles/migrations/0001_initial.py | 1 | 1937 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-26 07:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('tags', models.CharField(max_length=128)),
('content', models.TextField()),
('imgUrl', models.URLField()),
('views', models.PositiveIntegerField()),
('likes', models.IntegerField()),
('createTime', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'article',
},
),
migrations.CreateModel(
name='AuthorInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('introduce', models.CharField(max_length=512)),
('tags', models.CharField(max_length=512)),
('imgUrl', models.URLField()),
],
options={
'db_table': 'author_info',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=256)),
('createTime', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'comment',
},
),
]
| gpl-3.0 |
butterscotchstallion/SpiffyRPG | SpiffyRPG/SpiffyWorld/db.py | 1 | 1041 | # -*- coding: utf-8 -*-
import sqlite3 as lite
import os
class Database:
"""
Handles db connection
"""
def __init__(self, **kwargs):
self.log = None
if "log" in kwargs:
self.log = kwargs["log"]
if "path" in kwargs:
path = kwargs["path"]
if not path or path is None:
raise ValueError("Database: path is invalid")
self.path = path
else:
try:
self.path = os.environ.get("SPIFFYDB_PATH")
except KeyError:
pass
if not isinstance(self.path, str) or not self.path:
raise RuntimeError("Database path not found: %s" % self.path)
connection = lite.connect(self.path, check_same_thread=False)
connection.row_factory = lite.Row
if self.log is not None:
self.log.info("SpiffyWorld: initialized db path %s" % self.path)
self.connection = connection
def get_connection(self):
return self.connection
| mit |
afeyrer/Final-Project | ggame/sysdeps.py | 227 | 1916 | def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('browser') and module_exists('javascript'):
from browser import window, document
from javascript import JSObject, JSConstructor
GFX = JSObject(window.PIXI)
GFX_Rectangle = JSConstructor(GFX.Rectangle)
GFX_Texture = JSConstructor(GFX.Texture)
GFX_Texture_fromImage = JSConstructor(GFX.Texture.fromImage)
GFX_Sprite = JSConstructor(GFX.Sprite)
GFX_Graphics = JSConstructor(GFX.Graphics)()
GFX_Text = JSConstructor(GFX.Text)
GFX_DetectRenderer = GFX.autoDetectRenderer
SND = JSObject(window.buzz)
SND_Sound = JSConstructor(SND.sound)
class GFX_Window(object):
def __init__(self, width, height, onclose):
self._w = window.open("", "")
self._stage = JSConstructor(GFX.Container)()
self.width = width if width != 0 else int(window.innerWidth * 0.9)
self.height = height if height != 0 else int(window.innerHeight * 0.9)
self._renderer = GFX.autoDetectRenderer(self.width, self.height, {'transparent':True})
self._w.document.body.appendChild(self._renderer.view)
self._w.onunload = onclose
def bind(self, evtspec, callback):
self._w.document.body.bind(evtspec, callback)
def add(self, obj):
self._stage.addChild(obj)
def remove(self, obj):
self._stage.removeChild(obj)
def animate(self, stepcallback):
self._renderer.render(self._stage)
self._w.requestAnimationFrame(stepcallback)
def destroy(self):
SND.all().stop()
self._stage.destroy()
elif module_exists('pygame'):
try:
from ggame.pygamedeps import *
except:
from pygamedeps import *
else:
try:
from ggame.headlessdeps import *
except:
from headlessdeps import *
| mit |
learningequality/kolibri | kolibri/core/device/test/test_api.py | 2 | 18219 | import os
import platform
import sys
from collections import namedtuple
import mock
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from mock import patch
from morango.models import DatabaseIDModel
from morango.models import InstanceIDModel
from rest_framework import status
from rest_framework.test import APITestCase
import kolibri
from kolibri.core.auth.constants.role_kinds import ADMIN
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityDataset
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.models import Role
from kolibri.core.auth.test.helpers import clear_process_cache
from kolibri.core.auth.test.helpers import create_superuser
from kolibri.core.auth.test.helpers import provision_device
from kolibri.core.auth.test.test_api import FacilityFactory
from kolibri.core.auth.test.test_api import FacilityUserFactory
from kolibri.core.device.models import DevicePermissions
from kolibri.core.device.models import DeviceSettings
DUMMY_PASSWORD = "password"
class DeviceProvisionTestCase(APITestCase):
def setUp(self):
clear_process_cache()
superuser_data = {"username": "superuser", "password": "password"}
facility_data = {"name": "Wilson Elementary"}
preset_data = "nonformal"
dataset_data = {
"learner_can_edit_username": True,
"learner_can_edit_name": True,
"learner_can_edit_password": True,
"learner_can_sign_up": True,
"learner_can_delete_account": True,
"learner_can_login_with_no_password": False,
}
settings = {}
allow_guest_access = True
language_id = "en"
def _default_provision_data(self):
return {
"device_name": None,
"superuser": self.superuser_data,
"facility": self.facility_data,
"preset": self.preset_data,
"settings": self.settings,
"language_id": self.language_id,
"allow_guest_access": self.allow_guest_access,
}
def _post_deviceprovision(self, data):
return self.client.post(
reverse("kolibri:core:deviceprovision"), data, format="json"
)
def test_personal_setup_defaults(self):
data = self._default_provision_data()
data["preset"] = "informal"
# Client should pass an empty Dict for settings
data["settings"] = {}
self._post_deviceprovision(data)
settings = FacilityDataset.objects.get()
self.assertEqual(settings.learner_can_edit_username, True)
self.assertEqual(settings.learner_can_edit_name, True)
self.assertEqual(settings.learner_can_edit_password, True)
self.assertEqual(settings.learner_can_sign_up, True)
self.assertEqual(settings.learner_can_delete_account, True)
self.assertEqual(settings.learner_can_login_with_no_password, False)
self.assertEqual(settings.show_download_button_in_learn, True)
device_settings = DeviceSettings.objects.get()
self.assertEqual(device_settings.allow_guest_access, True)
def test_cannot_post_if_provisioned(self):
provision_device()
data = self._default_provision_data()
response = self._post_deviceprovision(data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_superuser_created(self):
data = self._default_provision_data()
self._post_deviceprovision(data)
self.assertEqual(
FacilityUser.objects.get().username, self.superuser_data["username"]
)
def test_superuser_password_set_correctly(self):
data = self._default_provision_data()
self._post_deviceprovision(data)
self.assertTrue(
FacilityUser.objects.get().check_password(self.superuser_data["password"])
)
def test_superuser_device_permissions_created(self):
data = self._default_provision_data()
self._post_deviceprovision(data)
self.assertEqual(
DevicePermissions.objects.get(),
FacilityUser.objects.get().devicepermissions,
)
def test_facility_created(self):
data = self._default_provision_data()
self._post_deviceprovision(data)
self.assertEqual(Facility.objects.get().name, self.facility_data["name"])
def test_admin_role_created(self):
data = self._default_provision_data()
self._post_deviceprovision(data)
self.assertEqual(Role.objects.get().kind, ADMIN)
def test_facility_role_created(self):
data = self._default_provision_data()
self._post_deviceprovision(data)
self.assertEqual(Role.objects.get().collection.name, self.facility_data["name"])
def test_dataset_set_created(self):
data = self._default_provision_data()
self._post_deviceprovision(data)
self.assertEqual(
FacilityDataset.objects.get().learner_can_edit_username,
self.dataset_data["learner_can_edit_username"],
)
self.assertEqual(
FacilityDataset.objects.get().learner_can_edit_name,
self.dataset_data["learner_can_edit_name"],
)
self.assertEqual(
FacilityDataset.objects.get().learner_can_edit_password,
self.dataset_data["learner_can_edit_password"],
)
self.assertEqual(
FacilityDataset.objects.get().learner_can_sign_up,
self.dataset_data["learner_can_sign_up"],
)
self.assertEqual(
FacilityDataset.objects.get().learner_can_delete_account,
self.dataset_data["learner_can_delete_account"],
)
self.assertEqual(
FacilityDataset.objects.get().learner_can_login_with_no_password,
self.dataset_data["learner_can_login_with_no_password"],
)
def test_device_settings_created(self):
data = self._default_provision_data()
self.assertEqual(DeviceSettings.objects.count(), 0)
self._post_deviceprovision(data)
self.assertEqual(DeviceSettings.objects.count(), 1)
def test_device_settings_values(self):
data = self._default_provision_data()
data["allow_guest_access"] = False
self._post_deviceprovision(data)
device_settings = DeviceSettings.objects.get()
self.assertEqual(device_settings.default_facility, Facility.objects.get())
self.assertFalse(device_settings.allow_guest_access)
self.assertFalse(device_settings.allow_peer_unlisted_channel_import)
self.assertTrue(device_settings.allow_learner_unassigned_resource_access)
class DeviceSettingsTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.settings = {
"language_id": "en",
"allow_guest_access": False,
"allow_peer_unlisted_channel_import": True,
"allow_learner_unassigned_resource_access": False,
}
cls.facility = FacilityFactory.create()
provision_device(language_id="es", default_facility=cls.facility)
cls.superuser = create_superuser(cls.facility)
cls.user = FacilityUserFactory.create(facility=cls.facility)
def setUp(self):
super(DeviceSettingsTestCase, self).setUp()
self.client.login(
username=self.superuser.username,
password=DUMMY_PASSWORD,
facility=self.facility,
)
def test_requires_authentication(self):
self.client.logout()
response = self.client.post(
reverse("kolibri:core:devicesettings"), self.settings, format="json"
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_post(self):
response = self.client.post(
reverse("kolibri:core:devicesettings"), self.settings, format="json"
)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_cannot_put(self):
response = self.client.put(
reverse("kolibri:core:devicesettings"), self.settings, format="json"
)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch(self):
device_settings = DeviceSettings.objects.get()
self.assertEqual("es", device_settings.language_id)
self.assertTrue(device_settings.allow_guest_access)
self.assertFalse(device_settings.allow_peer_unlisted_channel_import)
self.assertTrue(device_settings.allow_learner_unassigned_resource_access)
self.client.patch(
reverse("kolibri:core:devicesettings"), self.settings, format="json"
)
device_settings.refresh_from_db()
self.assertEqual("en", device_settings.language_id)
self.assertFalse(device_settings.allow_guest_access)
self.assertTrue(device_settings.allow_peer_unlisted_channel_import)
self.assertFalse(device_settings.allow_learner_unassigned_resource_access)
class DevicePermissionsTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
provision_device()
cls.facility = FacilityFactory.create()
cls.superuser = create_superuser(cls.facility)
cls.user = FacilityUserFactory.create(facility=cls.facility)
def setUp(self):
self.client.login(
username=self.superuser.username,
password=DUMMY_PASSWORD,
facility=self.facility,
)
def test_superuser_delete_own_permissions(self):
response = self.client.delete(
reverse(
"kolibri:core:devicepermissions-detail",
kwargs={"pk": self.superuser.devicepermissions.pk},
),
format="json",
)
self.assertEqual(response.status_code, 403)
def test_superuser_update_own_permissions(self):
response = self.client.patch(
reverse(
"kolibri:core:devicepermissions-detail",
kwargs={"pk": self.superuser.devicepermissions.pk},
),
{"is_superuser": False},
format="json",
)
self.assertEqual(response.status_code, 403)
class FreeSpaceTestCase(APITestCase):
def setUp(self):
provision_device()
self.facility = FacilityFactory.create()
self.superuser = create_superuser(self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.client.login(
username=self.superuser.username,
password=DUMMY_PASSWORD,
facility=self.facility,
)
def test_posix_freespace(self):
if not sys.platform.startswith("win"):
with mock.patch("kolibri.utils.system.os.statvfs") as os_statvfs_mock:
statvfs_result = namedtuple("statvfs_result", ["f_frsize", "f_bavail"])
os_statvfs_mock.return_value = statvfs_result(f_frsize=1, f_bavail=2)
response = self.client.get(
reverse("kolibri:core:freespace"), {"path": "test"}
)
os_statvfs_mock.assert_called_with(os.path.realpath("test"))
self.assertEqual(response.data, {"freespace": 2})
def test_win_freespace_fail(self):
if sys.platform.startswith("win"):
ctypes_mock = mock.MagicMock()
with mock.patch.dict("sys.modules", ctypes=ctypes_mock):
ctypes_mock.windll.kernel32.GetDiskFreeSpaceExW.return_value = 0
ctypes_mock.winError.side_effect = OSError
try:
self.client.get(reverse("kolibri:core:freespace"), {"path": "test"})
except OSError:
# check if ctypes.winError() has been called
ctypes_mock.winError.assert_called_with()
class DeviceInfoTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
provision_device()
DatabaseIDModel.objects.create()
cls.facility = FacilityFactory.create()
cls.superuser = create_superuser(cls.facility)
def setUp(self):
self.client.login(
username=self.superuser.username,
password=DUMMY_PASSWORD,
facility=self.facility,
)
def test_has_version(self):
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertEqual(response.data["version"], kolibri.__version__)
def test_urls(self):
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertFalse(len(response.data["urls"]) == 0)
for url in response.data["urls"]:
# Make sure each url is a valid link
self.assertTrue(url.startswith("http://"))
@patch(
"kolibri.core.device.api.get_urls",
return_value=(1, ["http://127.0.0.1:8000", "http://kolibri.com"]),
)
def test_no_localhost_urls_when_others_available(self, get_urls_mock):
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertEqual(len(response.data["urls"]), 1)
self.assertEqual(response.data["urls"][0], "http://kolibri.com")
@patch(
"kolibri.core.device.api.get_urls", return_value=(1, ["http://127.0.0.1:8000"])
)
def test_localhost_urls_when_no_others_available(self, get_urls_mock):
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertEqual(len(response.data["urls"]), 1)
self.assertEqual(response.data["urls"][0], "http://127.0.0.1:8000")
def test_database_path(self):
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
db_engine = settings.DATABASES["default"]["ENGINE"]
db_path = response.data["database_path"]
if db_engine.endswith("sqlite3"):
self.assertEqual(db_path, settings.DATABASES["default"]["NAME"])
elif db_engine.endswith("postgresql"):
self.assertEqual(db_path, "postgresql")
else:
self.assertEqual(db_path, "unknown")
def test_os(self):
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertEqual(response.data["os"], platform.platform())
def test_device_id(self):
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertEqual(
response.data["device_id"],
InstanceIDModel.get_or_create_current_instance()[0].id,
)
def test_time_zone(self):
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertTrue(response.data["server_timezone"], settings.TIME_ZONE)
def test_free_space(self):
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertEqual(type(response.data["content_storage_free_space"]), int)
def test_superuser_permissions(self):
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertEqual(response.status_code, 200)
def test_user_permissions(self):
self.user = FacilityUserFactory.create(facility=self.facility)
self.client.logout()
self.client.login(
username=self.user.username, password=DUMMY_PASSWORD, facility=self.facility
)
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertEqual(response.status_code, 403)
def test_user_with_permissions(self):
self.user = FacilityUserFactory.create(facility=self.facility)
DevicePermissions.objects.create(user=self.user, can_manage_content=True)
self.client.logout()
self.client.login(
username=self.user.username, password=DUMMY_PASSWORD, facility=self.facility
)
response = self.client.get(reverse("kolibri:core:deviceinfo"), format="json")
self.assertEqual(response.status_code, 200)
class DeviceNameTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.device_name = {"name": "test device"}
cls.facility = FacilityFactory.create()
provision_device(language_id="es", default_facility=cls.facility)
cls.superuser = create_superuser(cls.facility)
cls.user = FacilityUserFactory.create(facility=cls.facility)
def setUp(self):
super(DeviceNameTestCase, self).setUp()
self.client.login(
username=self.superuser.username,
password=DUMMY_PASSWORD,
facility=self.facility,
)
def test_requires_authentication(self):
self.client.logout()
response = self.client.post(
reverse("kolibri:core:devicename"), self.device_name, format="json"
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_existing_device_name(self):
response = self.client.get(reverse("kolibri:core:devicename"))
self.assertEqual(
response.data["name"],
InstanceIDModel.get_or_create_current_instance()[0].hostname,
)
def test_patch(self):
device_settings = DeviceSettings.objects.get()
self.assertEqual(
device_settings.name,
InstanceIDModel.get_or_create_current_instance()[0].hostname,
)
response = self.client.patch(
reverse("kolibri:core:devicename"), self.device_name, format="json"
)
self.assertEqual(response.data, self.device_name)
device_settings.refresh_from_db()
self.assertEqual(device_settings.name, self.device_name["name"])
self.assertNotEqual(
device_settings.name,
InstanceIDModel.get_or_create_current_instance()[0].hostname,
)
def test_device_name_max_length(self):
with self.assertRaises(ValidationError):
exceeds_max_length_name = {"name": "a" * 60}
self.client.patch(
reverse("kolibri:core:devicename"),
exceeds_max_length_name,
format="json",
)
| mit |
yonglehou/spiderfoot | ext/stem/util/test_tools.py | 12 | 10810 | # Copyright 2015, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Helper functions for testing.
.. versionadded:: 1.2.0
::
clean_orphaned_pyc - delete *.pyc files without corresponding *.py
is_pyflakes_available - checks if pyflakes is available
is_pep8_available - checks if pep8 is available
stylistic_issues - checks for PEP8 and other stylistic issues
pyflakes_issues - static checks for problems via pyflakes
"""
import collections
import linecache
import os
import re
import stem.util.conf
import stem.util.system
CONFIG = stem.util.conf.config_dict('test', {
'pep8.ignore': [],
'pyflakes.ignore': [],
'exclude_paths': [],
})
Issue = collections.namedtuple('Issue', [
'line_number',
'message',
'line',
])
def clean_orphaned_pyc(paths):
"""
Deletes any file with a *.pyc extention without a corresponding *.py. This
helps to address a common gotcha when deleting python files...
* You delete module 'foo.py' and run the tests to ensure that you haven't
broken anything. They pass, however there *are* still some 'import foo'
statements that still work because the bytecode (foo.pyc) is still around.
* You push your change.
* Another developer clones our repository and is confused because we have a
bunch of ImportErrors.
:param list paths: paths to search for orphaned pyc files
:returns: list of absolute paths that were deleted
"""
orphaned_pyc = []
for path in paths:
for pyc_path in stem.util.system.files_with_suffix(path, '.pyc'):
py_path = pyc_path[:-1]
# If we're running python 3 then the *.pyc files are no longer bundled
# with the *.py. Rather, they're in a __pycache__ directory.
pycache = '%s__pycache__%s' % (os.path.sep, os.path.sep)
if pycache in pyc_path:
directory, pycache_filename = pyc_path.split(pycache, 1)
if not pycache_filename.endswith('.pyc'):
continue # should look like 'test_tools.cpython-32.pyc'
py_path = os.path.join(directory, pycache_filename.split('.')[0] + '.py')
if not os.path.exists(py_path):
orphaned_pyc.append(pyc_path)
os.remove(pyc_path)
return orphaned_pyc
def is_pyflakes_available():
"""
Checks if pyflakes is availalbe.
:returns: **True** if we can use pyflakes and **False** otherwise
"""
try:
import pyflakes.api
import pyflakes.reporter
return True
except ImportError:
return False
def is_pep8_available():
"""
Checks if pep8 is availalbe.
:returns: **True** if we can use pep8 and **False** otherwise
"""
try:
import pep8
if not hasattr(pep8, 'BaseReport'):
raise ImportError()
return True
except ImportError:
return False
def stylistic_issues(paths, check_two_space_indents = False, check_newlines = False, check_trailing_whitespace = False, check_exception_keyword = False, prefer_single_quotes = False):
"""
Checks for stylistic issues that are an issue according to the parts of PEP8
we conform to. You can suppress PEP8 issues by making a 'test' configuration
that sets 'pep8.ignore'.
For example, with a 'test/settings.cfg' of...
::
# PEP8 compliance issues that we're ignoreing...
#
# * E111 and E121 four space indentations
# * E501 line is over 79 characters
pep8.ignore E111
pep8.ignore E121
pep8.ignore E501
... you can then run tests with...
::
import stem.util.conf
test_config = stem.util.conf.get_config('test')
test_config.load('test/settings.cfg')
issues = stylistic_issues('my_project')
If a 'exclude_paths' was set in our test config then we exclude any absolute
paths matching those regexes.
.. versionchanged:: 1.3.0
Renamed from get_stylistic_issues() to stylistic_issues(). The old name
still works as an alias, but will be dropped in Stem version 2.0.0.
.. versionchanged:: 1.4.0
Changing tuples in return value to be namedtuple instances, and adding the
line that had the issue.
.. versionchanged:: 1.4.0
Added the prefer_single_quotes option.
:param list paths: paths to search for stylistic issues
:param bool check_two_space_indents: check for two space indentations and
that no tabs snuck in
:param bool check_newlines: check that we have standard newlines (\\n), not
windows (\\r\\n) nor classic mac (\\r)
:param bool check_trailing_whitespace: check that our lines don't end with
trailing whitespace
:param bool check_exception_keyword: checks that we're using 'as' for
exceptions rather than a comma
:param bool prefer_single_quotes: standardize on using single rather than
double quotes for strings, when reasonable
:returns: **dict** of the form ``path => [(line_number, message)...]``
"""
issues = {}
if is_pep8_available():
import pep8
class StyleReport(pep8.BaseReport):
def __init__(self, options):
super(StyleReport, self).__init__(options)
def error(self, line_number, offset, text, check):
code = super(StyleReport, self).error(line_number, offset, text, check)
if code:
issues.setdefault(self.filename, []).append(Issue(line_number, '%s %s' % (code, text), text))
style_checker = pep8.StyleGuide(ignore = CONFIG['pep8.ignore'], reporter = StyleReport)
style_checker.check_files(list(_python_files(paths)))
if check_two_space_indents or check_newlines or check_trailing_whitespace or check_exception_keyword:
for path in _python_files(paths):
with open(path) as f:
file_contents = f.read()
lines = file_contents.split('\n')
is_block_comment = False
for index, line in enumerate(lines):
whitespace, content = re.match('^(\s*)(.*)$', line).groups()
# TODO: This does not check that block indentations are two spaces
# because differentiating source from string blocks ("""foo""") is more
# of a pita than I want to deal with right now.
if '"""' in content:
is_block_comment = not is_block_comment
if check_two_space_indents and '\t' in whitespace:
issues.setdefault(path, []).append(Issue(index + 1, 'indentation has a tab', line))
elif check_newlines and '\r' in content:
issues.setdefault(path, []).append(Issue(index + 1, 'contains a windows newline', line))
elif check_trailing_whitespace and content != content.rstrip():
issues.setdefault(path, []).append(Issue(index + 1, 'line has trailing whitespace', line))
elif check_exception_keyword and content.lstrip().startswith('except') and content.endswith(', exc:'):
# Python 2.6 - 2.7 supports two forms for exceptions...
#
# except ValueError, exc:
# except ValueError as exc:
#
# The former is the old method and no longer supported in python 3
# going forward.
# TODO: This check only works if the exception variable is called
# 'exc'. We should generalize this via a regex so other names work
# too.
issues.setdefault(path, []).append(Issue(index + 1, "except clause should use 'as', not comma", line))
if prefer_single_quotes and line and not is_block_comment:
content = line.strip().split('#', 1)[0]
if '"' in content and "'" not in content and '"""' not in content and not content.endswith('\\'):
# Checking if the line already has any single quotes since that
# usually means double quotes are preferable for the content (for
# instance "I'm hungry"). Also checking for '\' at the end since
# that can indicate a multi-line string.
issues.setdefault(path, []).append(Issue(index + 1, "use single rather than double quotes", line))
return issues
def pyflakes_issues(paths):
"""
Performs static checks via pyflakes. False positives can be ignored via
'pyflakes.ignore' entries in our 'test' config. For instance...
::
pyflakes.ignore stem/util/test_tools.py => 'pyflakes' imported but unused
pyflakes.ignore stem/util/test_tools.py => 'pep8' imported but unused
If a 'exclude_paths' was set in our test config then we exclude any absolute
paths matching those regexes.
.. versionchanged:: 1.3.0
Renamed from get_pyflakes_issues() to pyflakes_issues(). The old name
still works as an alias, but will be dropped in Stem version 2.0.0.
.. versionchanged:: 1.4.0
Changing tuples in return value to be namedtuple instances, and adding the
line that had the issue.
:param list paths: paths to search for problems
:returns: dict of the form ``path => [(line_number, message)...]``
"""
issues = {}
if is_pyflakes_available():
import pyflakes.api
import pyflakes.reporter
class Reporter(pyflakes.reporter.Reporter):
def __init__(self):
self._ignored_issues = {}
for line in CONFIG['pyflakes.ignore']:
path, issue = line.split('=>')
self._ignored_issues.setdefault(path.strip(), []).append(issue.strip())
def unexpectedError(self, filename, msg):
self._register_issue(filename, None, msg, None)
def syntaxError(self, filename, msg, lineno, offset, text):
self._register_issue(filename, lineno, msg, text)
def flake(self, msg):
self._register_issue(msg.filename, msg.lineno, msg.message % msg.message_args, None)
def _is_ignored(self, path, issue):
# Paths in pyflakes_ignore are relative, so we need to check to see if our
# path ends with any of them.
for ignored_path, ignored_issues in self._ignored_issues.items():
if path.endswith(ignored_path) and issue in ignored_issues:
return True
return False
def _register_issue(self, path, line_number, issue, line):
if not self._is_ignored(path, issue):
if path and line_number and not line:
line = linecache.getline(path, line_number)
issues.setdefault(path, []).append(Issue(line_number, issue, line))
reporter = Reporter()
for path in _python_files(paths):
pyflakes.api.checkPath(path, reporter)
return issues
def _python_files(paths):
for path in paths:
for file_path in stem.util.system.files_with_suffix(path, '.py'):
skip = False
for exclude_path in CONFIG['exclude_paths']:
if re.match(exclude_path, file_path):
skip = True
break
if not skip:
yield file_path
# TODO: drop with stem 2.x
# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old
# names for backward compatability.
get_stylistic_issues = stylistic_issues
get_pyflakes_issues = pyflakes_issues
| gpl-2.0 |
jakevdp/klsh | klsh/hamming_ann.py | 1 | 6489 | """
This is a set of classes to perform fast (approximate) nearest neighbors
searches over Hamming spaces.
[1] M. Charikar. Similarity Estimation Techniques from Rounding Algorithms.
ACM Symposium on Theory of Computing, 2002.
"""
__all__ = ["HammingANN", "HammingBrute", "HammingBallTree"]
import numpy as np
from scipy.spatial import distance
from sklearn.neighbors import BallTree
from .utils import create_rng, packbits_axis, unpackbits_axis, hamming_cdist
class HammingSearchBase(object):
"""Base class for Hamming neighbors search"""
def fit(self, X):
raise NotImplementedError('HammingSearchBase.fit')
def query(self, X, k, return_dist=False):
raise NotImplementedError('HammingSearchBase.query')
@staticmethod
def _validate_input(X, return_compact=True):
X = np.atleast_2d(np.asarray(X, dtype=np.uint8))
if X.ndim != 2:
raise ValueError("Input hamming array must be two dimensions")
if return_compact:
return packbits_axis(X)
else:
X[X != 0] = 1
return X
class HammingBrute(HammingSearchBase):
def __init__(self, compact=False):
self.compact = compact
def fit(self, X):
"""Fit a set of hamming vectors
Parameters
----------
X : array_like
an array of size (n_features, n_bits). Nonzero entries will be
evaluated as 1, and zero entries as 0
"""
if self.compact:
self._fit_X = self._validate_input(X)
else:
self._fit_X = self._validate_input(X, False)
return self
def query(self, X, k, return_dist=False):
if self.compact:
X = self._validate_input(X)
cdist = hamming_cdist(X, self._fit_X)
else:
X = self._validate_input(X, False)
cdist = distance.cdist(X, self._fit_X, 'hamming')
ind = np.argsort(cdist, 1)[:, :k]
if return_dist:
rows = np.arange(ind.shape[0])[:, np.newaxis]
dist = cdist[rows, ind]
if not self.compact:
dist = (dist * X.shape[1]).astype(int)
return ind, dist
else:
return ind
class HammingBallTree(HammingSearchBase):
def __init__(self, leaf_size=40, query_kwds=None):
self.leaf_size = leaf_size
self.query_kwds = query_kwds or {}
def fit(self, X):
X = self._validate_input(X, return_compact=False)
self._tree = BallTree(X, metric='hamming', leaf_size=self.leaf_size)
return self
def query(self, X, k, return_dist=False):
X = self._validate_input(X, return_compact=False)
if return_dist:
dist, ind = self._tree.query(X, k, return_distance=True)
return ind, (dist * X.shape[1]).astype(int)
else:
return self._tree.query(X, k, return_distance=False)
class HammingANN(HammingSearchBase):
def __init__(self, epsilon=0.5, random_state=None):
self.epsilon = epsilon
self.random_state = random_state
def fit(self, X):
"""Fit a set of hamming vectors
Parameters
----------
X : array_like
an array of size (n_features, n_bits). Nonzero entries will be
evaluated as 1, and zero entries as 0
"""
self._X_fit = self._validate_input(X, False)
self._X_fit_compact = packbits_axis(self._X_fit)
N, n_bits = self._X_fit.shape
# choose number of permutations based on epsilon
M = 2 * int(np.ceil(N ** (1. / (1. + self.epsilon))))
rng = create_rng(self.random_state)
P_indices = np.array([rng.choice(n_bits, n_bits, replace=False)
for i in range(M)])
# P_compact will be of shape (M, X.shape[0]), and contains
# M bit-permutations applied across all the keys
P = self._X_fit[:, P_indices]
P_compact = packbits_axis(P).T
# Do a lexicographic sort of all the permuted bits.
# Here's where cython would help immensely. We could store just
# M permutation-bit arrays, and write a custom sort & binary search
# which will work on these permutations and orderings.
sort_indices = np.argsort(P_compact, 1)
P_compact_sorted = P_compact[np.arange(M)[:, None], sort_indices]
unsort_indices = np.argsort(sort_indices, 1)
#----------------- just a sanity check (TODO: REMOVE THIS)
reordered = P_compact_sorted[np.arange(M)[:, np.newaxis],
unsort_indices]
assert np.all(reordered == P_compact)
#---------------------------------------------------------
self._sort_indices = sort_indices
self._unsort_indices = unsort_indices
self._P_compact_sorted = P_compact_sorted
return self
def query(self, X, k, return_dist=False):
"""Query a set of distances
Parameters
----------
X : array_like
an [n_samples, n_bits] array of hamming features. These will be
interpreted as zeros and ones.
"""
X_compact = self._validate_input(X)
nbrs = np.zeros([X_compact.shape[0], k], dtype=int)
if return_dist:
dist = np.zeros_like(nbrs)
M, N = self._P_compact_sorted.shape
# TODO: MAKE THIS MORE EFFICIENT
for i, val in enumerate(X_compact):
# find ordered index within each random permutation
P_indices = np.array([np.searchsorted(self._P_compact_sorted[j],
val) for j in range(M)])
# get upper/lower indices within each permutation
ind_uplo = np.clip(np.vstack([P_indices, P_indices + 1]), 0, N-1)
# from indices within the sorted permutations, find the
# unique set of indices from the original set of hashes
ind_to_check = np.unique(self._sort_indices[range(M), ind_uplo])
# compute hamming distances for these points, and put into results
distances = hamming_cdist(val, self._X_fit_compact[ind_to_check])
nearest = np.argsort(distances[0])[:k]
nbrs[i, :len(nearest)] = ind_to_check[nearest]
if return_dist:
dist[i, :len(nearest)] = distances[0, nearest[:k]]
if return_dist:
return nbrs, dist
else:
return nbrs
| bsd-3-clause |
prakritish/ansible | test/runner/units/test_diff.py | 203 | 2975 | """Tests for diff module."""
import os
import subprocess
import pytest
from lib.diff import (
parse_diff,
FileDiff,
)
def get_diff(base, head=None):
"""Return a git diff between the base and head revision.
:type base: str
:type head: str | None
:rtype: list[str]
"""
if not head or head == 'HEAD':
head = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
cache = '/tmp/git-diff-cache-%s-%s.log' % (base, head)
if os.path.exists(cache):
with open(cache, 'r') as cache_fd:
lines = cache_fd.read().splitlines()
else:
lines = subprocess.check_output(['git', 'diff', base, head]).splitlines()
with open(cache, 'w') as cache_fd:
cache_fd.write('\n'.join(lines))
assert lines
return lines
def get_parsed_diff(base, head=None):
"""Return a parsed git diff between the base and head revision.
:type base: str
:type head: str | None
:rtype: list[FileDiff]
"""
lines = get_diff(base, head)
items = parse_diff(lines)
assert items
for item in items:
assert item.headers
assert item.is_complete
item.old.format_lines()
item.new.format_lines()
for line_range in item.old.ranges:
assert line_range[1] >= line_range[0] > 0
for line_range in item.new.ranges:
assert line_range[1] >= line_range[0] > 0
return items
RANGES_TO_TEST = (
('f31421576b00f0b167cdbe61217c31c21a41ac02', 'HEAD'),
('b8125ac1a61f2c7d1de821c78c884560071895f1', '32146acf4e43e6f95f54d9179bf01f0df9814217')
)
@pytest.mark.parametrize("base, head", RANGES_TO_TEST)
def test_parse_diff(base, head):
"""Integration test to verify parsing of ansible/ansible history."""
get_parsed_diff(base, head)
def test_parse_delete():
"""Integration test to verify parsing of a deleted file."""
commit = 'ee17b914554861470b382e9e80a8e934063e0860'
items = get_parsed_diff(commit + '~', commit)
deletes = [item for item in items if not item.new.exists]
assert len(deletes) == 1
assert deletes[0].old.path == 'lib/ansible/plugins/connection/nspawn.py'
assert deletes[0].new.path == 'lib/ansible/plugins/connection/nspawn.py'
def test_parse_rename():
"""Integration test to verify parsing of renamed files."""
commit = '16a39639f568f4dd5cb233df2d0631bdab3a05e9'
items = get_parsed_diff(commit + '~', commit)
renames = [item for item in items if item.old.path != item.new.path and item.old.exists and item.new.exists]
assert len(renames) == 2
assert renames[0].old.path == 'test/integration/targets/eos_eapi/tests/cli/badtransport.yaml'
assert renames[0].new.path == 'test/integration/targets/eos_eapi/tests/cli/badtransport.1'
assert renames[1].old.path == 'test/integration/targets/eos_eapi/tests/cli/zzz_reset.yaml'
assert renames[1].new.path == 'test/integration/targets/eos_eapi/tests/cli/zzz_reset.1'
| gpl-3.0 |
pygeek/python-oauth2 | oauth2/clients/imap.py | 885 | 1685 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import imaplib
class IMAP4_SSL(imaplib.IMAP4_SSL):
"""IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH',
lambda x: oauth2.build_xoauth_string(url, consumer, token))
| mit |
nicoTrombon/DjangoPolls | env/Lib/site-packages/django/contrib/gis/db/models/lookups.py | 84 | 10596 | from __future__ import unicode_literals
import re
from django.core.exceptions import FieldDoesNotExist
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Expression
from django.db.models.lookups import Lookup
from django.utils import six
gis_lookups = {}
class GISLookup(Lookup):
sql_template = None
transform_func = None
distance = False
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
from django.contrib.gis.db.models.fields import GeometryField
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.rel.to._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
if isinstance(value, (tuple, list)):
# First param is assumed to be the geometric object
params = [connection.ops.Adapter(value[0])] + list(value)[1:]
else:
params = [connection.ops.Adapter(value)]
return ('%s', params)
def process_rhs(self, compiler, connection):
rhs, rhs_params = super(GISLookup, self).process_rhs(compiler, connection)
if hasattr(self.rhs, '_as_sql'):
# If rhs is some QuerySet, don't touch it
return rhs, rhs_params
geom = self.rhs
if isinstance(self.rhs, Col):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = self.rhs.output_field
if not hasattr(geo_fld, 'srid'):
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
elif isinstance(self.rhs, Expression):
raise ValueError('Complex expressions not supported for GeometryField')
elif isinstance(self.rhs, (list, tuple)):
geom = self.rhs[0]
rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, geom, compiler)
return rhs, rhs_params
def as_sql(self, compiler, connection):
lhs_sql, sql_params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
sql_params.extend(rhs_params)
template_params = {'lhs': lhs_sql, 'rhs': rhs_sql}
backend_op = connection.ops.gis_operators[self.lookup_name]
return backend_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
class OverlapsLeftLookup(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is to the
left of B's bounding box.
"""
lookup_name = 'overlaps_left'
gis_lookups['overlaps_left'] = OverlapsLeftLookup
class OverlapsRightLookup(GISLookup):
"""
The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the
right of B's bounding box.
"""
lookup_name = 'overlaps_right'
gis_lookups['overlaps_right'] = OverlapsRightLookup
class OverlapsBelowLookup(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or is below
B's bounding box.
"""
lookup_name = 'overlaps_below'
gis_lookups['overlaps_below'] = OverlapsBelowLookup
class OverlapsAboveLookup(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or is above
B's bounding box.
"""
lookup_name = 'overlaps_above'
gis_lookups['overlaps_above'] = OverlapsAboveLookup
class LeftLookup(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the left
of B's bounding box.
"""
lookup_name = 'left'
gis_lookups['left'] = LeftLookup
class RightLookup(GISLookup):
"""
The 'right' operator returns true if A's bounding box is strictly to the right
of B's bounding box.
"""
lookup_name = 'right'
gis_lookups['right'] = RightLookup
class StrictlyBelowLookup(GISLookup):
"""
The 'strictly_below' operator returns true if A's bounding box is strictly below B's
bounding box.
"""
lookup_name = 'strictly_below'
gis_lookups['strictly_below'] = StrictlyBelowLookup
class StrictlyAboveLookup(GISLookup):
"""
The 'strictly_above' operator returns true if A's bounding box is strictly above B's
bounding box.
"""
lookup_name = 'strictly_above'
gis_lookups['strictly_above'] = StrictlyAboveLookup
class SameAsLookup(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = 'same_as'
gis_lookups['same_as'] = SameAsLookup
class ExactLookup(SameAsLookup):
# Alias of same_as
lookup_name = 'exact'
gis_lookups['exact'] = ExactLookup
class BBContainsLookup(GISLookup):
"""
The 'bbcontains' operator returns true if A's bounding box completely contains
by B's bounding box.
"""
lookup_name = 'bbcontains'
gis_lookups['bbcontains'] = BBContainsLookup
class BBOverlapsLookup(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.
"""
lookup_name = 'bboverlaps'
gis_lookups['bboverlaps'] = BBOverlapsLookup
class ContainedLookup(GISLookup):
"""
The 'contained' operator returns true if A's bounding box is completely contained
by B's bounding box.
"""
lookup_name = 'contained'
gis_lookups['contained'] = ContainedLookup
# ------------------
# Geometry functions
# ------------------
class ContainsLookup(GISLookup):
lookup_name = 'contains'
gis_lookups['contains'] = ContainsLookup
class ContainsProperlyLookup(GISLookup):
lookup_name = 'contains_properly'
gis_lookups['contains_properly'] = ContainsProperlyLookup
class CoveredByLookup(GISLookup):
lookup_name = 'coveredby'
gis_lookups['coveredby'] = CoveredByLookup
class CoversLookup(GISLookup):
lookup_name = 'covers'
gis_lookups['covers'] = CoversLookup
class CrossesLookup(GISLookup):
lookup_name = 'crosses'
gis_lookups['crosses'] = CrossesLookup
class DisjointLookup(GISLookup):
lookup_name = 'disjoint'
gis_lookups['disjoint'] = DisjointLookup
class EqualsLookup(GISLookup):
lookup_name = 'equals'
gis_lookups['equals'] = EqualsLookup
class IntersectsLookup(GISLookup):
lookup_name = 'intersects'
gis_lookups['intersects'] = IntersectsLookup
class OverlapsLookup(GISLookup):
lookup_name = 'overlaps'
gis_lookups['overlaps'] = OverlapsLookup
class RelateLookup(GISLookup):
lookup_name = 'relate'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def get_db_prep_lookup(self, value, connection):
if len(value) != 2:
raise ValueError('relate must be passed a two-tuple')
# Check the pattern argument
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, 'check_relate_argument'):
backend_op.check_relate_argument(value[1])
else:
pattern = value[1]
if not isinstance(pattern, six.string_types) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
return super(RelateLookup, self).get_db_prep_lookup(value, connection)
gis_lookups['relate'] = RelateLookup
class TouchesLookup(GISLookup):
lookup_name = 'touches'
gis_lookups['touches'] = TouchesLookup
class WithinLookup(GISLookup):
lookup_name = 'within'
gis_lookups['within'] = WithinLookup
class DistanceLookupBase(GISLookup):
distance = True
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s'
def get_db_prep_lookup(self, value, connection):
if isinstance(value, (tuple, list)):
if not 2 <= len(value) <= 3:
raise ValueError("2 or 3-element tuple required for '%s' lookup." % self.lookup_name)
params = [connection.ops.Adapter(value[0])]
# Getting the distance parameter in the units of the field.
params += connection.ops.get_distance(self.lhs.output_field, value[1:], self.lookup_name)
return ('%s', params)
else:
return super(DistanceLookupBase, self).get_db_prep_lookup(value, connection)
class DWithinLookup(DistanceLookupBase):
lookup_name = 'dwithin'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
gis_lookups['dwithin'] = DWithinLookup
class DistanceGTLookup(DistanceLookupBase):
lookup_name = 'distance_gt'
gis_lookups['distance_gt'] = DistanceGTLookup
class DistanceGTELookup(DistanceLookupBase):
lookup_name = 'distance_gte'
gis_lookups['distance_gte'] = DistanceGTELookup
class DistanceLTLookup(DistanceLookupBase):
lookup_name = 'distance_lt'
gis_lookups['distance_lt'] = DistanceLTLookup
class DistanceLTELookup(DistanceLookupBase):
lookup_name = 'distance_lte'
gis_lookups['distance_lte'] = DistanceLTELookup
| bsd-3-clause |
brandicted/nefertari | tests/test_view.py | 2 | 26327 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pytest
from mock import Mock, MagicMock, patch, call, PropertyMock
from nefertari.view import (
BaseView, error_view, key_error_view, value_error_view)
from nefertari.utils import dictset
from nefertari.json_httpexceptions import (
JHTTPBadRequest, JHTTPNotFound, JHTTPMethodNotAllowed)
from nefertari.wrappers import wrap_me, ValidationError, ResourceNotFound
from nefertari.renderers import _JSONEncoder
class DummyBaseView(BaseView):
_json_encoder = _JSONEncoder
class TestViewMapper(object):
@patch('nefertari.view.trigger_before_events')
def test_trigger_before_events_called(self, mock_trigger):
from nefertari.view import ViewMapper
class MyView(object):
Model = Mock
def __init__(self, ctx, req):
self._before_calls = {}
self._after_calls = {}
self._json_params = {}
self.context = 'foo'
def index(self):
return ['thing']
request = MagicMock(
json={'username': 'admin'},
body='{"username":"admin"}')
resource = MagicMock(actions=['index'])
wrapper = ViewMapper(**{'attr': 'index'})(MyView)
wrapper(resource, request)
assert mock_trigger.called
def test_viewmapper(self):
from nefertari.view import ViewMapper
bc1 = Mock()
bc3 = Mock()
bc2 = Mock()
class MyView(object):
Model = Mock
_response = None
def __init__(self, ctx, req):
self._before_calls = {'index': [bc1], 'show': [bc3]}
self._after_calls = {'show': [bc2]}
self._json_params = {}
self.context = 'foo'
self.request = Mock(action='index')
@wrap_me(before=bc2)
def index(self):
return ['thing']
request = MagicMock()
resource = MagicMock(actions=['index'])
wrapper = ViewMapper(**{'attr': 'index'})(MyView)
result = wrapper(resource, request)
assert request.filters == {'show': [bc2]}
assert request.action == 'index'
assert result == ['thing']
bc1.assert_called_with(request=request)
assert not bc2.called
assert not bc3.called
def test_viewmapper_bad_request(self):
from nefertari.view import ViewMapper
bc1 = Mock(side_effect=ValidationError)
class MyView(object):
Model = Mock
def __init__(self, ctx, req):
self._before_calls = {'index': [bc1]}
self._after_calls = {}
def index(self):
return ['thing']
request = Mock()
resource = Mock(actions=['index'])
wrapper = ViewMapper(**{'attr': 'index'})(MyView)
with pytest.raises(JHTTPBadRequest):
wrapper(resource, request)
def test_viewmapper_not_found(self):
from nefertari.view import ViewMapper
bc1 = Mock(side_effect=ResourceNotFound)
class MyView(object):
Model = 'foo'
def __init__(self, ctx, req):
self._before_calls = {'index': [bc1]}
self._after_calls = {}
self._json_params = {}
self.context = 'foo'
self.request = Mock(action='index')
def index(self):
return ['thing']
request = Mock()
resource = Mock(actions=['index'])
wrapper = ViewMapper(**{'attr': 'index'})(MyView)
with pytest.raises(JHTTPNotFound):
wrapper(resource, request)
class TestBaseView(object):
def get_common_mock_request(self):
return Mock(content_type='', method='', accept=[''], user=None)
def test_baseview(self, *a):
class UsersView(BaseView):
_json_encoder = _JSONEncoder
def __init__(self, context, request):
BaseView.__init__(self, context, request)
self._json_params = {}
self.context = 'foo'
self.request = Mock(action='index')
def show(self, id):
return 'John Doe'
def convert_ids2objects(self, *args, **kwargs):
pass
request = MagicMock(content_type='')
request.matched_route.pattern = '/users'
view = UsersView(request.context, request)
assert 'John Doe' == view.show(1)
with pytest.raises(JHTTPMethodNotAllowed):
view.index()
with pytest.raises(AttributeError):
view.frobnicate()
# delete is an allowed action, but it raises since BaseView
# does not implement it.
with pytest.raises(JHTTPMethodNotAllowed):
view.delete()
def test_convert_dotted(self):
converted = BaseView.convert_dotted({
'settings.foo': 'bar',
'option': 'value',
'one.two.three.four': 4,
'one.two.six': 6,
})
assert sorted(converted.keys()) == sorted([
'settings', 'option', 'one'])
assert converted['settings'] == {'foo': 'bar'}
assert converted['option'] == 'value'
assert converted['one'] == {
'two': {
'three': {'four': 4},
'six': 6,
},
}
assert 'settings.foo' not in converted
def test_convert_dotted_no_dotted(self):
converted = BaseView.convert_dotted({
'option': 'value'
})
assert converted == {'option': 'value'}
@patch('nefertari.view.BaseView._run_init_actions')
def test_init(self, run):
request = Mock(
content_type='application/json',
json={'param1.foo': 'val1', 'param3': 'val3'},
method='POST',
accept=[''],
)
request.params.mixed.return_value = {'param2.foo': 'val2'}
view = DummyBaseView(context={'foo': 'bar'}, request=request)
run.assert_called_once_with()
assert request.override_renderer == 'nefertari_json'
assert list(sorted(view._params.keys())) == [
'param1', 'param2', 'param3']
assert view._params['param1'] == {'foo': 'val1'}
assert view._params['param2'] == {'foo': 'val2'}
assert view._params['param3'] == 'val3'
assert view.request == request
assert view.context == {'foo': 'bar'}
assert view._before_calls == {}
assert view._after_calls == {}
@patch('nefertari.view.BaseView._run_init_actions')
def test_init_json_accept_header(self, run):
request = Mock(
content_type='application/json',
json={'param1.foo': 'val1', 'param3': 'val3'},
method='POST',
accept=['application/json'],
)
request.params.mixed.return_value = {'param2.foo': 'val2'}
DummyBaseView(context={'foo': 'bar'}, request=request)
assert request.override_renderer == 'nefertari_json'
@patch('nefertari.view.BaseView._run_init_actions')
def test_init_text_ct_and_accept(self, run):
request = Mock(
content_type='text/plain',
json={'param1.foo': 'val1', 'param3': 'val3'},
method='POST',
accept=['text/plain'],
)
request.params.mixed.return_value = {'param2.foo': 'val2'}
view = DummyBaseView(context={'foo': 'bar'}, request=request)
assert request.override_renderer == 'string'
assert list(view._params.keys()) == ['param2']
@patch('nefertari.view.BaseView._run_init_actions')
def test_init_json_error(self, run):
import simplejson
request = Mock(
content_type='application/json',
method='POST',
accept=['application/json'],
)
type(request).json = PropertyMock(
side_effect=simplejson.JSONDecodeError(
'foo', 'asdasdasdasd', pos=1))
request.params.mixed.return_value = {'param2.foo': 'val2'}
view = DummyBaseView(context={'foo': 'bar'}, request=request)
assert request.override_renderer == 'nefertari_json'
assert list(view._params.keys()) == ['param2']
@patch('nefertari.view.BaseView.setup_default_wrappers')
@patch('nefertari.view.BaseView.convert_ids2objects')
@patch('nefertari.view.BaseView.set_public_limits')
def test_run_init_actions(self, limit, conv, setpub):
request = Mock(
content_type='text/plain',
json={'param1.foo': 'val1', 'param3': 'val3'},
method='POST',
accept=['text/plain'],
)
request.params.mixed.return_value = {'param2.foo': 'val2'}
DummyBaseView(context={'foo': 'bar'}, request=request)
limit.assert_called_once_with()
conv.assert_called_once_with()
setpub.assert_called_once_with()
@patch('nefertari.elasticsearch.ES')
@patch('nefertari.view.ESAggregator')
def test_setup_aggregation_es_disabled(self, aggregator, mock_es):
mock_es.settings = dictset(enable_aggregations=False)
request = Mock(content_type='', method='', accept=[''])
view = DummyBaseView(context={}, request=request,
_query_params={'foo': 'bar'})
view.index = 1
view._setup_aggregation()
assert view.index == 1
@patch('nefertari.elasticsearch.ES')
@patch('nefertari.view.ESAggregator')
def test_setup_aggregation_index_not_defined(self, aggregator, mock_es):
mock_es.settings = dictset(enable_aggregations=True)
request = Mock(content_type='', method='', accept=[''])
view = DummyBaseView(context={}, request=request,
_query_params={'foo': 'bar'})
assert view.index == view.not_allowed_action
view._setup_aggregation()
with pytest.raises(JHTTPMethodNotAllowed):
view.index()
@patch('nefertari.elasticsearch.ES')
@patch('nefertari.view.ESAggregator')
def test_setup_aggregation(self, aggregator, mock_es):
mock_es.settings = dictset(enable_aggregations=True)
request = Mock(content_type='', method='', accept=[''])
view = DummyBaseView(context={}, request=request,
_query_params={'foo': 'bar'})
type(view).index = 1
view._setup_aggregation()
aggregator.assert_called_once_with(view)
aggregator().wrap.assert_called_once_with(1)
assert view.index == aggregator().wrap()
@patch('nefertari.elasticsearch.ES')
def test_get_collection_es(self, mock_es):
request = Mock(content_type='', method='', accept=[''])
view = DummyBaseView(
context={}, request=request,
_query_params={'foo': 'bar'})
view.Model = Mock(__name__='MyModel')
view._query_params['q'] = 'movies'
result = view.get_collection_es()
mock_es.assert_called_once_with('MyModel')
mock_es().get_collection.assert_called_once_with(
foo='bar', q='movies')
assert result == mock_es().get_collection()
@patch('nefertari.view.BaseView._run_init_actions')
def test_fill_null_values(self, run):
request = Mock(content_type='', method='', accept=[''])
view = DummyBaseView(
context={}, request=request,
_query_params={'foo': 'bar'})
view.Model = Mock()
view.Model.get_null_values.return_value = {
'name': None, 'email': 1, 'foo': None}
view._json_params = {'foo': 'bar'}
view.fill_null_values()
assert view._json_params == {
'foo': 'bar', 'name': None, 'email': 1
}
@patch('nefertari.view.BaseView._run_init_actions')
def test_init_no_root(self, run):
request = Mock(content_type='', method='', accept=[''])
kwargs = dict(
context={}, request=request, _query_params={'foo': 'bar'})
view = DummyBaseView(**kwargs)
view.root_resource = None
view.__init__(**kwargs)
assert not view._auth_enabled
@patch('nefertari.view.wrappers')
@patch('nefertari.view.BaseView._run_init_actions')
def test_set_public_limits_no_auth(self, run, wrap):
request = Mock(content_type='', method='', accept=[''])
kwargs = dict(
context={}, request=request, _query_params={'foo': 'bar'})
view = DummyBaseView(**kwargs)
view._auth_enabled = False
view.set_public_limits()
assert not wrap.set_public_limits.called
@patch('nefertari.view.wrappers')
@patch('nefertari.view.BaseView._run_init_actions')
def test_set_public_limits_user_authenticated(self, run, wrap):
request = Mock(content_type='', method='', accept=[''], user='foo')
kwargs = dict(
context={}, request=request, _query_params={'foo': 'bar'})
view = DummyBaseView(**kwargs)
view._auth_enabled = True
view.set_public_limits()
assert not wrap.set_public_limits.called
@patch('nefertari.view.wrappers')
@patch('nefertari.view.BaseView._run_init_actions')
def test_set_public_limits_applied(self, run, wrap):
request = self.get_common_mock_request()
kwargs = dict(
context={}, request=request, _query_params={'foo': 'bar'})
view = DummyBaseView(**kwargs)
view._auth_enabled = True
view.set_public_limits()
wrap.set_public_limits.assert_called_once_with(view)
@patch('nefertari.view.engine')
@patch('nefertari.view.BaseView.id2obj')
@patch('nefertari.view.BaseView._run_init_actions')
def test_convert_ids2objects_non_relational(self, run, id2obj, eng):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo1': 'bar'},
_json_params={'foo': 'bar'})
view.Model = 'Model1'
eng.is_relationship_field.return_value = False
view.convert_ids2objects()
eng.is_relationship_field.assert_called_once_with('foo', 'Model1')
assert not id2obj.called
@patch('nefertari.view.engine')
@patch('nefertari.view.BaseView.id2obj')
@patch('nefertari.view.BaseView._run_init_actions')
def test_convert_ids2objects_relational(self, run, id2obj, eng):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo1': 'bar'},
_json_params={'foo': 'bar'})
view.Model = 'Model1'
eng.is_relationship_field.return_value = True
view.convert_ids2objects()
eng.get_relationship_cls.assert_called_once_with('foo', 'Model1')
id2obj.assert_called_once_with('foo', eng.get_relationship_cls())
@patch('nefertari.view.wrappers')
@patch('nefertari.view.BaseView._run_init_actions')
def test_setup_default_wrappers_with_auth(self, run, wrap):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
view._auth_enabled = True
view.setup_default_wrappers()
assert len(view._after_calls['index']) == 4
assert len(view._after_calls['show']) == 4
assert len(view._after_calls['create']) == 4
assert len(view._after_calls['update']) == 4
assert len(view._after_calls['replace']) == 4
assert wrap.apply_privacy.call_count == 5
@patch('nefertari.view.wrappers')
@patch('nefertari.view.BaseView._run_init_actions')
def test_setup_default_wrappers_no_auth(self, run, wrap):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
view._auth_enabled = False
view.setup_default_wrappers()
assert len(view._after_calls['index']) == 3
assert len(view._after_calls['show']) == 3
assert not wrap.apply_privacy.called
def test_defalt_wrappers_and_wrap_me(self):
from nefertari import wrappers
self.maxDiff = None
def before_call(*a):
return a[2]
def after_call(*a):
return a[2]
class MyView(BaseView):
_json_encoder = _JSONEncoder
@wrappers.wrap_me(before=before_call, after=after_call)
def index(self):
return [1, 2, 3]
def convert_ids2objects(self, *args, **kwargs):
pass
request = MagicMock(content_type='')
resource = MagicMock(actions=['index'])
view = MyView(resource, request)
assert len(view._after_calls['index']) == 3
assert len(view._after_calls['show']) == 3
assert view.index._before_calls == [before_call]
assert view.index._after_calls == [after_call]
@patch('nefertari.view.BaseView._run_init_actions')
def test_not_allowed_action(self, run):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
with pytest.raises(JHTTPMethodNotAllowed):
view.not_allowed_action()
@patch('nefertari.view.BaseView._run_init_actions')
def test_add_before_or_after_before(self, run):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
callable_ = lambda x: x
view.add_before_or_after_call(
action='foo', _callable=callable_, pos=None, before=True)
assert callable_ in view._before_calls['foo']
@patch('nefertari.view.BaseView._run_init_actions')
def test_add_before_or_after_after(self, run):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
callable_ = lambda x: x
view.add_before_or_after_call(
action='foo', _callable=callable_, pos=None, before=False)
assert callable_ in view._after_calls['foo']
@patch('nefertari.view.BaseView._run_init_actions')
def test_add_before_or_after_position(self, run):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
callable1 = lambda x: x
callable2 = lambda x: x + x
view.add_before_or_after_call(
action='foo', _callable=callable1, pos=None,
before=False)
assert callable1 is view._after_calls['foo'][0]
view.add_before_or_after_call(
action='foo', _callable=callable2, pos=0,
before=False)
assert callable2 is view._after_calls['foo'][0]
assert callable1 is view._after_calls['foo'][1]
@patch('nefertari.view.BaseView._run_init_actions')
def test_add_before_or_after_not_callable(self, run):
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
with pytest.raises(ValueError) as ex:
view.add_before_or_after_call(
action='foo', _callable='asdasd', pos=None,
before=False)
assert str(ex.value) == 'asdasd is not a callable'
@patch('nefertari.view.urllib')
@patch('nefertari.view.Request')
@patch('nefertari.view.BaseView._run_init_actions')
def test_subrequest_get(self, run, req, ulib):
request = Mock(
content_type='', method='', accept=[''], user=None,
cookies=['1'])
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
view.subrequest(url='http://', params={'par': 'val'}, method='GET')
req.blank.assert_called_once_with(
'http://', cookies=['1'], content_type='application/json',
method='GET')
view.request.invoke_subrequest.assert_called_once_with(req.blank())
ulib.parse.urlencode.assert_called_once_with({'par': 'val'})
@patch('nefertari.view.json')
@patch('nefertari.view.Request')
@patch('nefertari.view.BaseView._run_init_actions')
def test_subrequest_post(self, run, req, json):
request = Mock(
content_type='', method='', accept=[''], user=None,
cookies=['1'])
view = DummyBaseView(
context={}, request=request, _query_params={'foo': 'bar'})
view.subrequest(url='http://', params={'par': 'val'}, method='POST')
req.blank.assert_called_once_with(
'http://', cookies=['1'], content_type='application/json',
method='POST')
view.request.invoke_subrequest.assert_called_once_with(req.blank())
json.dumps.assert_called_once_with({'par': 'val'})
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj(self, run):
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = 'foo'
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['user'] = '1'
view.id2obj(name='user', model=model)
assert view._json_params['user'] == 'foo'
model.pk_field.assert_called_once_with()
model.get_item.assert_called_once_with(
idname='1', _raise_on_empty=False)
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_list(self, run):
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = 'foo'
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['user'] = ['1']
view.id2obj(name='user', model=model)
assert view._json_params['user'] == ['foo']
model.pk_field.assert_called_once_with()
model.get_item.assert_called_once_with(
idname='1', _raise_on_empty=False)
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_not_in_params(self, run):
model = Mock()
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view.id2obj(name='asdasdasd', model=model)
assert not model.pk_field.called
assert not model.get_item.called
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_setdefault(self, run):
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = None
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['user'] = '1'
view.id2obj(name='user', model=model, setdefault=123)
assert view._json_params['user'] == 123
model.pk_field.assert_called_once_with()
model.get_item.assert_called_once_with(
idname='1', _raise_on_empty=False)
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_value_none(self, run):
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = 'foo'
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['users'] = [None, '1']
view._json_params['story'] = None
view.id2obj(name='users', model=model)
view.id2obj(name='story', model=model)
assert view._json_params['users'] == [None, 'foo']
assert view._json_params['story'] is None
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_already_object(self, run):
id_ = Mock()
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = None
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['user'] = id_
view.id2obj(name='user', model=model, setdefault=123)
assert view._json_params['user'] == id_
model.pk_field.assert_called_once_with()
assert not model.get_item.called
@patch('nefertari.view.BaseView._run_init_actions')
def test_id2obj_not_found(self, run):
model = Mock()
model.pk_field.return_value = 'idname'
model.get_item.return_value = None
request = self.get_common_mock_request()
view = DummyBaseView(
context={}, request=request, _json_params={'foo': 'bar'},
_query_params={'foo1': 'bar1'})
view._json_params['user'] = '1'
with pytest.raises(JHTTPBadRequest) as ex:
view.id2obj(name='user', model=model)
assert str(ex.value) == 'id2obj: Object 1 not found'
class TestViewHelpers(object):
def test_key_error_view(self):
resp = key_error_view(Mock(args=('foo',)), None)
assert str(resp.message) == "Bad or missing param 'foo'"
def test_value_error_view(self):
resp = value_error_view(Mock(args=('foo',)), None)
assert str(resp.message) == "Bad or missing value 'foo'"
def test_error_view(self):
resp = error_view(Mock(args=('foo',)), None)
assert str(resp.message) == "foo"
def test_includeme(self):
from nefertari.view import includeme
config = Mock()
includeme(config)
calls = [
call(key_error_view, context=KeyError),
call(value_error_view, context=ValueError),
call(error_view, context=Exception)
]
config.add_view.assert_has_calls(calls, any_order=True)
| apache-2.0 |
dcmichael/stardict-3 | tools/src/stardict_images.py | 41 | 7885 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from gimpfu import *
import os
def prepare_image(image, visibleLayers, size, numColors = None):
"""prepare custom image
image - image object to change
size - size of the image in pixels
visibleLayers - a list of layers that must be visible
"""
for layer in image.layers:
if layer.name in visibleLayers:
layer.visible = True
else:
image.remove_layer(layer)
gimp.pdb.gimp_image_merge_visible_layers(image, CLIP_TO_IMAGE)
drawable=gimp.pdb.gimp_image_get_active_layer(image)
gimp.pdb.gimp_layer_scale_full(drawable, size, size, False, INTERPOLATION_CUBIC)
"""
image 670x670, all layers have the same dimensions
after applying gimp_image_scale_full functions with size=32,
image.width = 32, image.height = 32
layer.width = 27, layer.height = 31
gimp.pdb.gimp_image_scale_full(image, size, size, INTERPOLATION_CUBIC)
"""
#print 'width = {0}, height = {1}'.format(drawable.width, drawable.height)
#print 'width = {0}, height = {1}'.format(image.width, image.height)
if numColors != None:
gimp.pdb.gimp_image_convert_indexed(image, NO_DITHER, MAKE_PALETTE, numColors, False, False, "")
def save_image(image, dstFilePath):
dirPath = os.path.dirname(dstFilePath)
if not os.path.exists(dirPath):
os.makedirs(dirPath)
drawable=gimp.pdb.gimp_image_get_active_layer(image)
gimp.pdb.gimp_file_save(image, drawable, dstFilePath, dstFilePath)
gimp.delete(drawable)
gimp.delete(image)
def create_icon(origImage, visibleLayers, props):
"""visibleLayers - a list of layers that must be visible
props - tuple of image properties in format ((size, bpp), ...)
where:
size - size of the icon in pixels,
bpp - bits per pixel, None to leave by default
return value - new image
"""
iconImage = None
i = 0
for prop in props:
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, visibleLayers, prop[0], prop[1])
image.layers[0].name = 's{0}'.format(i)
if iconImage == None:
iconImage = image
else:
newLayer = gimp.pdb.gimp_layer_new_from_drawable(image.layers[0], iconImage)
gimp.pdb.gimp_image_add_layer(iconImage, newLayer, -1)
gimp.delete(image)
i += 1
return iconImage
def stardict_images(srcFilePath, rootDir):
if not rootDir:
# srcFilePath = rootDir + "/pixmaps/stardict.xcf"
if not srcFilePath.endswith("/pixmaps/stardict.xcf"):
print 'Unable to automatically detect StarDict root directory. Specify non-blank root directory parameter.'
return
dstDirPath = os.path.dirname(srcFilePath)
dstDirPath = os.path.dirname(dstDirPath)
else:
dstDirPath = rootDir
"""
print 'srcFilePath = {0}'.format(srcFilePath)
print 'rootDir = {0}'.format(rootDir)
print 'dstDirPath = {0}'.format(dstDirPath)
"""
dstStarDict_s128_FilePath=os.path.join(dstDirPath, "pixmaps/stardict_128.png")
dstStarDict_s32_FilePath=os.path.join(dstDirPath, "pixmaps/stardict_32.png")
dstStarDict_s16_FilePath=os.path.join(dstDirPath, "pixmaps/stardict_16.png")
dstStarDict_FilePath=os.path.join(dstDirPath, "pixmaps/stardict.png")
dstStarDictEditor_s128_FilePath=os.path.join(dstDirPath, "pixmaps/stardict-editor_128.png")
dstStarDictEditor_s32_FilePath=os.path.join(dstDirPath, "pixmaps/stardict-editor_32.png")
dstStarDictEditor_s16_FilePath=os.path.join(dstDirPath, "pixmaps/stardict-editor_16.png")
dstStarDictIconFilePath=os.path.join(dstDirPath, "pixmaps/stardict.ico")
dstStarDictEditorIconFilePath=os.path.join(dstDirPath, "pixmaps/stardict-editor.ico")
dstStarDictUninstIconFilePath=os.path.join(dstDirPath, "pixmaps/stardict-uninst.ico")
dstDockletNormalFilePath=os.path.join(dstDirPath, "src/pixmaps/docklet_normal.png")
dstDockletScanFilePath=os.path.join(dstDirPath, "src/pixmaps/docklet_scan.png")
dstDockletStopFilePath=os.path.join(dstDirPath, "src/pixmaps/docklet_stop.png")
dstDockletGPENormalFilePath=os.path.join(dstDirPath, "src/pixmaps/docklet_gpe_normal.png")
dstDockletGPEScanFilePath=os.path.join(dstDirPath, "src/pixmaps/docklet_gpe_scan.png")
dstDockletGPEStopFilePath=os.path.join(dstDirPath, "src/pixmaps/docklet_gpe_stop.png")
dstWordPickFilePath=os.path.join(dstDirPath, "src/win32/acrobat/win32/wordPick.bmp")
origImage=gimp.pdb.gimp_file_load(srcFilePath, srcFilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2"), 128)
save_image(image, dstStarDict_s128_FilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2"), 32)
save_image(image, dstStarDict_s32_FilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2"), 16)
save_image(image, dstStarDict_s16_FilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2"), 64)
save_image(image, dstStarDict_FilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2", "edit"), 128)
save_image(image, dstStarDictEditor_s128_FilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2", "edit"), 32)
save_image(image, dstStarDictEditor_s32_FilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2", "edit"), 16)
save_image(image, dstStarDictEditor_s16_FilePath)
image = create_icon(origImage, ("book1", "book2"),
((16, None), (32, None), (48, None), (16, 256), (32, 256), (48, 256), (256, None))
)
save_image(image, dstStarDictIconFilePath)
image = create_icon(origImage, ("book1", "book2", "edit"),
((16, None), (32, None), (48, None), (16, 256), (32, 256), (48, 256), (256, None))
)
save_image(image, dstStarDictEditorIconFilePath)
image = create_icon(origImage, ("book1", "book2", "cross"),
((16, None), (32, None), (48, None), (16, 256), (32, 256), (48, 256), (256, None))
)
save_image(image, dstStarDictUninstIconFilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2"), 32)
save_image(image, dstDockletNormalFilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2", "search"), 32)
save_image(image, dstDockletScanFilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2", "stop"), 32)
save_image(image, dstDockletStopFilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2"), 16)
save_image(image, dstDockletGPENormalFilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2", "search"), 16)
save_image(image, dstDockletGPEScanFilePath)
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2", "stop"), 16)
save_image(image, dstDockletGPEStopFilePath)
# See AVToolButtonNew function in PDF API Reference
# Recommended icon size is 18x18, but it looks too small...
image = gimp.pdb.gimp_image_duplicate(origImage)
prepare_image(image, ("book1", "book2"), 22)
gimp.set_background(192, 192, 192)
gimp.pdb.gimp_layer_flatten(image.layers[0])
save_image(image, dstWordPickFilePath)
register(
"stardict_images",
"Create images for StarDict",
"Create images for StarDict",
"StarDict team",
"GPL",
"Mar 2011",
"<Toolbox>/Tools/stardict images",
"",
[
(PF_FILE, "src_image", "Multilayer image used as source for all other images in StarDict, "
+ "normally that is pixmaps/stardict.xcf is StarDict source tree.", None),
(PF_DIRNAME, "stardict_dir", "Root directory of StarDict source tree. New images will be saved here.", None)
],
[],
stardict_images)
main()
| gpl-3.0 |
rajalokan/nova | nova/tests/unit/api/openstack/test_api_version_request.py | 11 | 7131 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import api_version_request
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
class APIVersionRequestTests(test.NoDBTestCase):
def test_valid_version_strings(self):
def _test_string(version, exp_major, exp_minor):
v = api_version_request.APIVersionRequest(version)
self.assertEqual(v.ver_major, exp_major)
self.assertEqual(v.ver_minor, exp_minor)
_test_string("1.1", 1, 1)
_test_string("2.10", 2, 10)
_test_string("5.234", 5, 234)
_test_string("12.5", 12, 5)
_test_string("2.0", 2, 0)
_test_string("2.200", 2, 200)
def test_null_version(self):
v = api_version_request.APIVersionRequest()
self.assertTrue(v.is_null())
def test_invalid_version_strings(self):
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "200")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.1.4")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "200.23.66.3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5 .3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5. 3")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "5.03")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "02.1")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.001")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, " 2.1")
self.assertRaises(exception.InvalidAPIVersionString,
api_version_request.APIVersionRequest, "2.1 ")
def test_version_comparisons(self):
vers1 = api_version_request.APIVersionRequest("2.0")
vers2 = api_version_request.APIVersionRequest("2.5")
vers3 = api_version_request.APIVersionRequest("5.23")
vers4 = api_version_request.APIVersionRequest("2.0")
v_null = api_version_request.APIVersionRequest()
self.assertLess(v_null, vers2)
self.assertLess(vers1, vers2)
self.assertLessEqual(vers1, vers2)
self.assertLessEqual(vers1, vers4)
self.assertGreater(vers2, v_null)
self.assertGreater(vers3, vers2)
self.assertGreaterEqual(vers1, vers4)
self.assertGreaterEqual(vers3, vers2)
self.assertNotEqual(vers1, vers2)
self.assertEqual(vers1, vers4)
self.assertNotEqual(vers1, v_null)
self.assertEqual(v_null, v_null)
self.assertRaises(TypeError, vers1.__lt__, "2.1")
def test_version_matches(self):
vers1 = api_version_request.APIVersionRequest("2.0")
vers2 = api_version_request.APIVersionRequest("2.5")
vers3 = api_version_request.APIVersionRequest("2.45")
vers4 = api_version_request.APIVersionRequest("3.3")
vers5 = api_version_request.APIVersionRequest("3.23")
vers6 = api_version_request.APIVersionRequest("2.0")
vers7 = api_version_request.APIVersionRequest("3.3")
vers8 = api_version_request.APIVersionRequest("4.0")
v_null = api_version_request.APIVersionRequest()
self.assertTrue(vers2.matches(vers1, vers3))
self.assertTrue(vers2.matches(vers1, v_null))
self.assertTrue(vers1.matches(vers6, vers2))
self.assertTrue(vers4.matches(vers2, vers7))
self.assertTrue(vers4.matches(v_null, vers7))
self.assertTrue(vers4.matches(v_null, vers8))
self.assertFalse(vers1.matches(vers2, vers3))
self.assertFalse(vers5.matches(vers2, vers4))
self.assertFalse(vers2.matches(vers3, vers1))
self.assertRaises(ValueError, v_null.matches, vers1, vers3)
def test_get_string(self):
vers1_string = "3.23"
vers1 = api_version_request.APIVersionRequest(vers1_string)
self.assertEqual(vers1_string, vers1.get_string())
self.assertRaises(ValueError,
api_version_request.APIVersionRequest().get_string)
def test_is_supported_min_version(self):
req = fakes.HTTPRequest.blank('/fake', version='2.5')
self.assertTrue(api_version_request.is_supported(
req, min_version='2.4'))
self.assertTrue(api_version_request.is_supported(
req, min_version='2.5'))
self.assertFalse(api_version_request.is_supported(
req, min_version='2.6'))
def test_is_supported_max_version(self):
req = fakes.HTTPRequest.blank('/fake', version='2.5')
self.assertFalse(api_version_request.is_supported(
req, max_version='2.4'))
self.assertTrue(api_version_request.is_supported(
req, max_version='2.5'))
self.assertTrue(api_version_request.is_supported(
req, max_version='2.6'))
def test_is_supported_min_and_max_version(self):
req = fakes.HTTPRequest.blank('/fake', version='2.5')
self.assertFalse(api_version_request.is_supported(
req, min_version='2.3', max_version='2.4'))
self.assertTrue(api_version_request.is_supported(
req, min_version='2.3', max_version='2.5'))
self.assertTrue(api_version_request.is_supported(
req, min_version='2.3', max_version='2.7'))
self.assertTrue(api_version_request.is_supported(
req, min_version='2.5', max_version='2.7'))
self.assertFalse(api_version_request.is_supported(
req, min_version='2.6', max_version='2.7'))
self.assertTrue(api_version_request.is_supported(
req, min_version='2.5', max_version='2.5'))
self.assertFalse(api_version_request.is_supported(
req, min_version='2.10', max_version='2.1'))
| apache-2.0 |
palerdot/calibre | src/calibre/utils/fonts/free_type.py | 5 | 2580 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import threading
from functools import wraps
from future_builtins import map
from calibre.constants import plugins
class ThreadingViolation(Exception):
def __init__(self):
Exception.__init__(self,
'You cannot use the freetype plugin from a thread other than the '
' thread in which startup() was called')
def same_thread(func):
@wraps(func)
def check_thread(self, *args, **kwargs):
if self.start_thread is not threading.current_thread():
raise ThreadingViolation()
return func(self, *args, **kwargs)
return check_thread
FreeTypeError = getattr(plugins['freetype'][0], 'FreeTypeError', Exception)
class Face(object):
def __init__(self, face):
self.start_thread = threading.current_thread()
self.face = face
for x in ('family_name', 'style_name'):
val = getattr(self.face, x)
try:
val = val.decode('utf-8')
except UnicodeDecodeError:
val = repr(val).decode('utf-8')
setattr(self, x, val)
@same_thread
def supports_text(self, text, has_non_printable_chars=True):
'''
Returns True if all the characters in text have glyphs in this font.
'''
if not isinstance(text, unicode):
raise TypeError('%r is not a unicode object'%text)
if has_non_printable_chars:
from calibre.utils.fonts.utils import get_printable_characters
text = get_printable_characters(text)
chars = tuple(frozenset(map(ord, text)))
return self.face.supports_text(chars)
@same_thread
def glyph_ids(self, text):
if not isinstance(text, unicode):
raise TypeError('%r is not a unicode object'%text)
for char in text:
yield self.face.glyph_id(ord(char))
class FreeType(object):
def __init__(self):
self.start_thread = threading.current_thread()
ft, ft_err = plugins['freetype']
if ft_err:
raise RuntimeError('Failed to load FreeType module with error: %s'
% ft_err)
self.ft = ft.FreeType()
@same_thread
def load_font(self, data):
return Face(self.ft.load_font(data))
| gpl-3.0 |
niltonlk/nest-simulator | pynest/examples/spatial/test_3d.py | 14 | 2140 | # -*- coding: utf-8 -*-
#
# test_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
A spatial network in 3D
-------------------------
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
nest.ResetKernel()
pos = nest.spatial.free(nest.random.uniform(-0.5, 0.5), extent=[1.5, 1.5, 1.5])
l1 = nest.Create('iaf_psc_alpha', 1000, positions=pos)
# visualize
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*nest.GetPosition(l1))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b')
# full connections in box volume [-0.2,0.2]**3
nest.Connect(l1, l1,
{'rule': 'pairwise_bernoulli',
'p': 1.,
'allow_autapses': False,
'mask': {'box': {'lower_left': [-0.2, -0.2, -0.2],
'upper_right': [0.2, 0.2, 0.2]}}})
# show connections from center element
# sender shown in red, targets in green
ctr = nest.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*nest.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = nest.GetPosition(ctr)
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = nest.GetTargetNodes(ctr, l1)[0]
distances = nest.Distance(ctr, l1)
tgt_distances = [d for i, d in enumerate(distances) if i + 1 in tgts]
plt.figure()
plt.hist(tgt_distances, 25)
plt.show()
| gpl-2.0 |
Jgarcia-IAS/SITE | openerp/exceptions.py | 312 | 3157 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" OpenERP core exceptions.
This module defines a few exception types. Those types are understood by the
RPC layer. Any other exception type bubbling until the RPC layer will be
treated as a 'Server error'.
If you consider introducing new exceptions, check out the test_exceptions addon.
"""
# kept for backward compatibility
class except_orm(Exception):
def __init__(self, name, value):
self.name = name
self.value = value
self.args = (name, value)
class Warning(Exception):
pass
class RedirectWarning(Exception):
""" Warning with a possibility to redirect the user instead of simply
diplaying the warning message.
Should receive as parameters:
:param int action_id: id of the action where to perform the redirection
:param string button_text: text to put on the button that will trigger
the redirection.
"""
class AccessDenied(Exception):
""" Login/password error. No message, no traceback. """
def __init__(self):
super(AccessDenied, self).__init__('Access denied.')
self.traceback = ('', '', '')
class AccessError(except_orm):
""" Access rights error. """
def __init__(self, msg):
super(AccessError, self).__init__('AccessError', msg)
class MissingError(except_orm):
""" Missing record(s). """
def __init__(self, msg):
super(MissingError, self).__init__('MissingError', msg)
class ValidationError(except_orm):
def __init__(self, msg):
super(ValidationError, self).__init__('ValidateError', msg)
class DeferredException(Exception):
""" Exception object holding a traceback for asynchronous reporting.
Some RPC calls (database creation and report generation) happen with
an initial request followed by multiple, polling requests. This class
is used to store the possible exception occuring in the thread serving
the first request, and is then sent to a polling request.
('Traceback' is misleading, this is really a exc_info() triple.)
"""
def __init__(self, msg, tb):
self.message = msg
self.traceback = tb
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
os2webscanner/os2webscanner | django-os2webscanner/os2webscanner/validate.py | 1 | 3231 | # The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Domain validation functions."""
import hashlib
import re
from urllib.error import URLError
from urllib.parse import urljoin
from urllib.request import Request, urlopen
from .models.domains.webdomain_model import WebDomain
def _do_request(url):
"""Make a request and return the data."""
try:
request = Request(url, headers={"User-Agent": "OS2Webscanner"})
r = urlopen(request)
# TODO: We get decoding error when using utf-8. But it should be utf-8 decoded.
return r.read().decode('latin1')
# except urllib2.URLError, urllib2.HTTPError:
except URLError:
return None
def _get_validation_hash(domain):
"""Return the validation hash for the domain.
The validation hash is based on the domain's organization's primary key.
"""
return hashlib.md5(str(domain.organization.pk).encode('utf-8')).hexdigest()
def get_validation_str(domain, method=None):
"""Return the validation string for the domain.
The validation string is what must be inserted by the user into a
specific file in the root of the domain. The validation string returned is
dependent on the domain's validation method and the domain's
organization.
"""
hash_str = _get_validation_hash(domain)
if method is None:
method = domain.validation_method
if method == WebDomain.ROBOTSTXT:
return "User-agent: " + hash_str + "\nDisallow:"
elif method == WebDomain.WEBSCANFILE:
return hash_str
elif method == WebDomain.METAFIELD:
return '<meta name="os2webscanner" content="' + hash_str + '" />'
def validate_domain(domain):
"""Validate a Domain by using the Domain's validation method.
Returns True if it validated or False if it did not.
"""
hash_str = _get_validation_hash(domain)
validators = {
WebDomain.ROBOTSTXT: {
"url": "/robots.txt",
"regex": "User-agent: " + hash_str + "(\r\n|\r|\n)Disallow:"
},
WebDomain.WEBSCANFILE: {
"url": "/webscan.html",
"regex": hash_str
},
WebDomain.METAFIELD: {
"url": "/",
"regex": '<meta name="os2webscanner" content="' + hash_str + '"'
}
}
validator = validators[domain.validation_method]
url = urljoin(domain.root_url, validator["url"])
r = _do_request(url)
if r is None:
return False
match = re.search(validator["regex"], r, re.I)
return match is not None
| mpl-2.0 |
nvbn/thefuck | thefuck/rules/omnienv_no_such_command.py | 1 | 1061 | import re
from thefuck.utils import (cache, for_app, replace_argument, replace_command,
which)
from subprocess import PIPE, Popen
supported_apps = 'goenv', 'nodenv', 'pyenv', 'rbenv'
enabled_by_default = any(which(a) for a in supported_apps)
COMMON_TYPOS = {
'list': ['versions', 'install --list'],
'remove': ['uninstall'],
}
@for_app(*supported_apps, at_least=1)
def match(command):
return 'env: no such command ' in command.output
def get_app_commands(app):
proc = Popen([app, 'commands'], stdout=PIPE)
return [line.decode('utf-8').strip() for line in proc.stdout.readlines()]
def get_new_command(command):
broken = re.findall(r"env: no such command ['`]([^']*)'", command.output)[0]
matched = [replace_argument(command.script, broken, common_typo)
for common_typo in COMMON_TYPOS.get(broken, [])]
app = command.script_parts[0]
app_commands = cache(which(app))(get_app_commands)(app)
matched.extend(replace_command(command, broken, app_commands))
return matched
| mit |
nextgis-extra/tests | lib_gdal/gdrivers/wmts.py | 1 | 55102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: wmts.py 33793 2016-03-26 13:02:07Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: WMTS driver test suite.
# Author: Even Rouault, even dot rouault at spatialys.com
#
###############################################################################
# Copyright (c) 2015, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
import shutil
sys.path.append( '../pymod' )
from osgeo import gdal
import gdaltest
###############################################################################
# Find WMTS driver
def wmts_1():
gdaltest.wmts_drv = gdal.GetDriverByName('WMTS')
if gdaltest.wmts_drv is not None and gdal.GetDriverByName('WMS') is None:
print('Missing WMS driver')
gdaltest.wmts_drv = None
if gdaltest.wmts_drv is not None:
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.SetConfigOption('GDAL_DEFAULT_WMS_CACHE_PATH', '/vsimem/cache')
return 'success'
else:
return 'skip'
###############################################################################
# Error: no URL and invalid GDAL_WMTS service file documents
def wmts_2():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ds = gdal.Open('<GDAL_WMTS>')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ds = gdal.Open('<GDAL_WMTSxxx/>')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ds = gdal.Open('<GDAL_WMTS></GDAL_WMTS>')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: invalid URL
def wmts_3():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:https://non_existing')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: invalid URL
def wmts_4():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:/vsimem/non_existing')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: invalid XML in GetCapabilities response
def wmts_5():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/invalid_getcapabilities.xml', '<invalid_xml')
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:/vsimem/invalid_getcapabilities.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: invalid content in GetCapabilities response
def wmts_6():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/invalid_getcapabilities.xml', '<Capabilities/>')
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:/vsimem/invalid_getcapabilities.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: no layers
def wmts_7():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/empty_getcapabilities.xml', '<Capabilities><Contents/></Capabilities>')
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:/vsimem/empty_getcapabilities.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: missing TileMatrixSetLink and Style
def wmts_8():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/missing.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier/>
</Layer>
</Contents>
</Capabilities>""")
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:/vsimem/missing.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: missing TileMatrixSet
def wmts_9():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/missing_tms.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier/>
<TileMatrixSetLink>
<TileMatrixSet/>
</TileMatrixSetLink>
<Style>
<Identifier/>
</Style>
<ResourceURL format="image/png" template="/vsimem/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.jpeg" resourceType="tile"/>
</Layer>
</Contents>
</Capabilities>""")
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:/vsimem/missing_tms.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: Missing SupportedCRS
def wmts_10():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/missing_SupportedCRS.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier/>
<TileMatrixSetLink>
<TileMatrixSet/>
</TileMatrixSetLink>
<Style>
<Identifier/>
</Style>
<ResourceURL format="image/png" template="/vsimem/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.jpeg" resourceType="tile"/>
</Layer>
<TileMatrixSet>
<Identifier/>
</TileMatrixSet>
</Contents>
</Capabilities>""")
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:/vsimem/missing_SupportedCRS.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: Cannot find TileMatrix in TileMatrixSet
def wmts_11():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/no_tilematrix.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier/>
<TileMatrixSetLink>
<TileMatrixSet/>
</TileMatrixSetLink>
<Style>
<Identifier/>
</Style>
<ResourceURL format="image/png" template="/vsimem/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.jpeg" resourceType="tile"/>
</Layer>
<TileMatrixSet>
<Identifier/>
<SupportedCRS>urn:ogc:def:crs:EPSG:6.18:3:3857</SupportedCRS>
</TileMatrixSet>
</Contents>
</Capabilities>""")
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:/vsimem/no_tilematrix.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: Missing required element in TileMatrix element
def wmts_12():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/missing_required_element_in_tilematrix.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier/>
<TileMatrixSetLink>
<TileMatrixSet/>
</TileMatrixSetLink>
<Style>
<Identifier/>
</Style>
<ResourceURL format="image/png" template="/vsimem/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.jpeg" resourceType="tile"/>
</Layer>
<TileMatrixSet>
<Identifier/>
<SupportedCRS>urn:ogc:def:crs:EPSG:6.18:3:3857</SupportedCRS>
<TileMatrix/>
</TileMatrixSet>
</Contents>
</Capabilities>""")
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:/vsimem/missing_required_element_in_tilematrix.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Error: Missing ResourceURL
def wmts_12bis():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wmts_12bis.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier/>
<TileMatrixSetLink>
<TileMatrixSet/>
</TileMatrixSetLink>
<Style>
<Identifier/>
</Style>
</Layer>
<TileMatrixSet>
<Identifier/>
<SupportedCRS>urn:ogc:def:crs:EPSG:6.18:3:3857</SupportedCRS>
<TileMatrix>
<Identifier>0</Identifier>
<ScaleDenominator>559082264.029</ScaleDenominator>
<TopLeftCorner>-20037508.3428 20037508.3428</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
</Capabilities>""")
gdal.PushErrorHandler()
ds = gdal.Open('WMTS:/vsimem/wmts_12bis.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Minimal
def wmts_13():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/minimal.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier/>
<TileMatrixSetLink>
<TileMatrixSet/>
</TileMatrixSetLink>
<Style>
<Identifier/>
</Style>
<ResourceURL format="image/png" template="/vsimem/{TileMatrix}/{TileRow}/{TileCol}.png" resourceType="tile"/>
</Layer>
<TileMatrixSet>
<Identifier/>
<SupportedCRS>urn:ogc:def:crs:EPSG:6.18:3:3857</SupportedCRS>
<TileMatrix>
<Identifier>0</Identifier>
<ScaleDenominator>559082264.029</ScaleDenominator>
<TopLeftCorner>-20037508.3428 20037508.3428</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
</Capabilities>""")
ds = gdal.Open('WMTS:/vsimem/minimal.xml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.RasterXSize != 256:
gdaltest.post_reason('fail')
return 'fail'
if ds.RasterYSize != 256:
gdaltest.post_reason('fail')
return 'fail'
got_gt = ds.GetGeoTransform()
expected_gt = (-20037508.342799999, 156543.03392811998, 0.0, 20037508.342799999, 0.0, -156543.03392811998)
for i in range(6):
if abs(got_gt[i] - expected_gt[i]) > 1e-8:
gdaltest.post_reason('fail')
print(got_gt)
return 'fail'
if ds.GetProjectionRef().find('3857') < 0:
gdaltest.post_reason('fail')
return 'fail'
if ds.RasterCount != 4:
gdaltest.post_reason('fail')
return 'fail'
for i in range(4):
if ds.GetRasterBand(i+1).GetColorInterpretation() != gdal.GCI_RedBand + i:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetRasterBand(1).GetOverviewCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetRasterBand(1).GetOverview(0) is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
cs = ds.GetRasterBand(1).Checksum()
gdal.PopErrorHandler()
if cs != 0:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetSubDatasets() != []:
gdaltest.post_reason('fail')
print(ds.GetSubDatasets())
return 'fail'
if ds.GetRasterBand(1).GetMetadataItem('Pixel_0_0', 'LocationInfo') is not None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetRasterBand(1).GetMetadataItem('foo') is not None:
gdaltest.post_reason('fail')
return 'fail'
for connection_str in [ 'WMTS:/vsimem/minimal.xml,layer=',
'WMTS:/vsimem/minimal.xml,style=',
'WMTS:/vsimem/minimal.xml,tilematrixset=',
'WMTS:/vsimem/minimal.xml,layer=,style=,tilematrixset=' ]:
ds = gdal.Open(connection_str)
if ds is None:
gdaltest.post_reason('fail')
print(connection_str)
return 'fail'
ds = None
for connection_str in [ 'WMTS:/vsimem/minimal.xml,layer=foo',
'WMTS:/vsimem/minimal.xml,style=bar',
'WMTS:/vsimem/minimal.xml,tilematrixset=baz' ]:
gdal.PushErrorHandler()
ds = gdal.Open(connection_str)
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
print(connection_str)
return 'fail'
ds = None
ds = gdal.Open('WMTS:/vsimem/minimal.xml')
tmp_ds = gdal.GetDriverByName('MEM').Create('',256,256,4)
for i in range(4):
tmp_ds.GetRasterBand(i+1).Fill((i+1)*255/4)
tmp_ds = gdal.GetDriverByName('PNG').CreateCopy('/vsimem/0/0/0.png', tmp_ds)
for i in range(4):
cs = ds.GetRasterBand(i+1).Checksum()
if cs != tmp_ds.GetRasterBand(i+1).Checksum():
gdaltest.post_reason('fail')
return 'fail'
ref_data = tmp_ds.ReadRaster(0,0,256,256)
got_data = ds.ReadRaster(0,0,ds.RasterXSize,ds.RasterYSize,256,256)
if ref_data != got_data:
gdaltest.post_reason('fail')
return 'fail'
ref_data = tmp_ds.GetRasterBand(1).ReadRaster(0,0,256,256)
got_data = ds.GetRasterBand(1).ReadRaster(0,0,ds.RasterXSize,ds.RasterYSize,256,256)
if ref_data != got_data:
gdaltest.post_reason('fail')
return 'fail'
ds = None
wmts_CleanCache()
return 'success'
###############################################################################
# Nominal RESTful
def wmts_14():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/nominal.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier>lyr1</Identifier>
<Title>My layer1</Title>
<Abstract>My abstract</Abstract>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180 -85.0511287798065</ows:LowerCorner>
<ows:UpperCorner>180 85.0511287798065</ows:UpperCorner>
</ows:WGS84BoundingBox>
<Dimension>
<ows:Identifier>time</ows:Identifier>
<UOM>ISO8601</UOM>
<Default>2011-10-04</Default>
<Current>false</Current>
<Value>2002-06-01/2011-10-04/P1D</Value>
</Dimension>
<TileMatrixSetLink>
<TileMatrixSet>tms</TileMatrixSet>
</TileMatrixSetLink>
<TileMatrixSetLink>
<TileMatrixSet>another_tms</TileMatrixSet>
</TileMatrixSetLink>
<Style isDefault="true">
<Identifier>style=auto</Identifier>
<Title>Default style</Title>
</Style>
<Style>
<Identifier>another_style</Identifier>
<Title>Another style</Title>
</Style>
<ResourceURL format="image/png"
template="/vsimem/{time}/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" resourceType="tile"/>
<ResourceURL format="text/plain"
template="/vsimem/{time}/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}/{J}/{I}.txt" resourceType="FeatureInfo"/>
</Layer>
<TileMatrixSet>
<Identifier>tms</Identifier>
<SupportedCRS>urn:ogc:def:crs:EPSG:6.18:3:3857</SupportedCRS>
<TileMatrix>
<Identifier>0</Identifier>
<ScaleDenominator>559082264.029</ScaleDenominator>
<TopLeftCorner>-20037508.3428 20037508.3428</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>18</ows:Identifier>
<ScaleDenominator>2132.72958385</ScaleDenominator>
<TopLeftCorner>-20037508.3428 20037508.3428</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>262144</MatrixWidth>
<MatrixHeight>262144</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>24</ows:Identifier>
<ScaleDenominator>33.3238997477</ScaleDenominator>
<TopLeftCorner>-20037508.3428 20037508.3428</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>16777216</MatrixWidth>
<MatrixHeight>16777216</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
<TileMatrixSet>
<Identifier>another_tms</Identifier>
<ows:Identifier>GoogleCRS84Quad</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::4326</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:0</ows:Identifier>
<ScaleDenominator>5.590822640287178E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
<ServiceMetadataURL xlink:href="/vsimem/nominal.xml"/>
</Capabilities>""")
ds = gdal.Open('WMTS:/vsimem/nominal.xml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetSubDatasets() != [('WMTS:/vsimem/nominal.xml,layer=lyr1,tilematrixset=tms,style="style=auto"',
'Layer My layer1, tile matrix set tms, style "Default style"'),
('WMTS:/vsimem/nominal.xml,layer=lyr1,tilematrixset=tms,style=another_style',
'Layer My layer1, tile matrix set tms, style "Another style"'),
('WMTS:/vsimem/nominal.xml,layer=lyr1,tilematrixset=another_tms,style="style=auto"',
'Layer My layer1, tile matrix set another_tms, style "Default style"'),
('WMTS:/vsimem/nominal.xml,layer=lyr1,tilematrixset=another_tms,style=another_style',
'Layer My layer1, tile matrix set another_tms, style "Another style"')]:
gdaltest.post_reason('fail')
print(ds.GetSubDatasets())
return 'fail'
if ds.RasterXSize != 67108864:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
res = ds.GetRasterBand(1).GetMetadataItem('Pixel_1_2', 'LocationInfo')
gdal.PopErrorHandler()
if res != '':
gdaltest.post_reason('fail')
print(res)
return 'fail'
if ds.GetMetadata() != {'ABSTRACT': 'My abstract', 'TITLE': 'My layer1'}:
gdaltest.post_reason('fail')
print(ds.GetMetadata())
return 'fail'
gdal.PushErrorHandler()
gdaltest.wmts_drv.CreateCopy('/vsimem/gdal_nominal.xml', gdal.GetDriverByName('MEM').Create('',1,1))
gdal.PopErrorHandler()
gdaltest.wmts_drv.CreateCopy('/vsimem/gdal_nominal.xml', ds)
ds = None
f = gdal.VSIFOpenL('/vsimem/gdal_nominal.xml', 'rb')
data = gdal.VSIFReadL(1, 10000, f).decode('ascii')
gdal.VSIFCloseL(f)
if data != """<GDAL_WMTS>
<GetCapabilitiesUrl>/vsimem/nominal.xml</GetCapabilitiesUrl>
<Layer>lyr1</Layer>
<Style>style=auto</Style>
<TileMatrixSet>tms</TileMatrixSet>
<DataWindow>
<UpperLeftX>-20037508.3428</UpperLeftX>
<UpperLeftY>20037508.3428</UpperLeftY>
<LowerRightX>20037508.34278254</LowerRightX>
<LowerRightY>-20037508.34278254</LowerRightY>
</DataWindow>
<BandsCount>4</BandsCount>
<Cache />
<UnsafeSSL>true</UnsafeSSL>
<ZeroBlockHttpCodes>204,404</ZeroBlockHttpCodes>
<ZeroBlockOnServerException>true</ZeroBlockOnServerException>
</GDAL_WMTS>
""":
gdaltest.post_reason('fail')
print(data)
return 'fail'
ds = gdal.Open('/vsimem/gdal_nominal.xml')
gdal.FileFromMemBuffer('/vsimem/2011-10-04/style=auto/tms/18/0/0/2/1.txt', 'foo')
res = ds.GetRasterBand(1).GetMetadataItem('Pixel_1_2', 'LocationInfo')
if res != '<LocationInfo>foo</LocationInfo>':
gdaltest.post_reason('fail')
print(res)
return 'fail'
res = ds.GetRasterBand(1).GetMetadataItem('Pixel_1_2', 'LocationInfo')
if res != '<LocationInfo>foo</LocationInfo>':
gdaltest.post_reason('fail')
print(res)
return 'fail'
ds = gdal.Open('<GDAL_WMTS><GetCapabilitiesUrl>/vsimem/nominal.xml</GetCapabilitiesUrl></GDAL_WMTS>')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.Open('WMTS:/vsimem/gdal_nominal.xml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
for open_options in [ ['URL=/vsimem/nominal.xml'],
['URL=/vsimem/nominal.xml', 'STYLE=style=auto', 'TILEMATRIXSET=tms'] ]:
ds = gdal.OpenEx('WMTS:', open_options = open_options)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
for open_options in [ ['URL=/vsimem/nominal.xml', 'STYLE=x', 'TILEMATRIXSET=y'] ]:
gdal.PushErrorHandler()
ds = gdal.OpenEx('WMTS:', open_options = open_options)
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.Open('WMTS:/vsimem/nominal.xml')
gdal.FileFromMemBuffer('/vsimem/2011-10-04/style=auto/tms/18/0/0/2/1.txt', '<?xml version="1.0" encoding="UTF-8"?><xml_content/>')
res = ds.GetRasterBand(1).GetMetadataItem('Pixel_1_2', 'LocationInfo')
if res != """<LocationInfo><xml_content />
</LocationInfo>""":
gdaltest.post_reason('fail')
print(res)
return 'fail'
return 'success'
###############################################################################
# Nominal KVP
def wmts_15():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/nominal_kvp.xml?service=WMTS&request=GetCapabilities', """<Capabilities xmlns="http://www.opengis.net/wmts/1.0">
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="/vsimem/nominal_kvp.xml?">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>KVP</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetTile">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="/vsimem/nominal_kvp.xml?">
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetFeatureInfo">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="/vsimem/nominal_kvp.xml?">
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
<Contents>
<Layer>
<Identifier>lyr1</Identifier>
<Title>My layer1</Title>
<ows:BoundingBox crs="urn:ogc:def:crs:EPSG:6.18:3:3857">
<ows:LowerCorner>-20037508.3428 -20037508.3428</ows:LowerCorner>
<ows:UpperCorner>20037508.3428 20037508.3428</ows:UpperCorner>
</ows:BoundingBox>
<Dimension>
<ows:Identifier>time</ows:Identifier>
<UOM>ISO8601</UOM>
<Default>2011-10-04</Default>
<Current>false</Current>
<Value>2002-06-01/2011-10-04/P1D</Value>
</Dimension>
<TileMatrixSetLink>
<TileMatrixSet>tms</TileMatrixSet>
</TileMatrixSetLink>
<Style isDefault="true">
<Identifier>default_style</Identifier>
<Title>Default style</Title>
</Style>
<Format>image/jpeg</Format>
<Format>image/png</Format>
<InfoFormat>text/plain</InfoFormat>
</Layer>
<TileMatrixSet>
<Identifier>tms</Identifier>
<ows:BoundingBox crs="urn:ogc:def:crs:EPSG:6.18:3:3857">
<ows:LowerCorner>-20037508.3428 -20037508.3428</ows:LowerCorner>
<ows:UpperCorner>20037508.3428 20037508.3428</ows:UpperCorner>
</ows:BoundingBox>
<SupportedCRS>urn:ogc:def:crs:EPSG:6.18:3:3857</SupportedCRS>
<TileMatrix>
<Identifier>0</Identifier>
<ScaleDenominator>559082264.029</ScaleDenominator>
<TopLeftCorner>-20037508.3428 20037508.3428</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>18</ows:Identifier>
<ScaleDenominator>2132.72958385</ScaleDenominator>
<TopLeftCorner>-20037508.3428 20037508.3428</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>262144</MatrixWidth>
<MatrixHeight>262144</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>24</ows:Identifier>
<ScaleDenominator>33.3238997477</ScaleDenominator>
<TopLeftCorner>-20037508.3428 20037508.3428</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>16777216</MatrixWidth>
<MatrixHeight>16777216</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
</Capabilities>""")
ds = gdal.Open('/vsimem/nominal_kvp.xml?service=WMTS&request=GetCapabilities')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.RasterXSize != 67108864:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
res = ds.GetRasterBand(1).GetMetadataItem('Pixel_1_2', 'LocationInfo')
gdal.PopErrorHandler()
if res != '':
gdaltest.post_reason('fail')
print(res)
return 'fail'
gdaltest.wmts_drv.CreateCopy('/vsimem/gdal_nominal_kvp.xml', ds)
ds = None
ds = gdal.Open('/vsimem/gdal_nominal_kvp.xml')
gdal.FileFromMemBuffer('/vsimem/nominal_kvp.xml?service=WMTS&request=GetFeatureInfo&version=1.0.0&layer=lyr1&style=default_style&InfoFormat=text/plain&TileMatrixSet=tms&TileMatrix=18&TileRow=0&TileCol=0&J=2&I=1&time=2011-10-04', 'bar')
res = ds.GetRasterBand(1).GetMetadataItem('Pixel_1_2', 'LocationInfo')
if res != '<LocationInfo>bar</LocationInfo>':
gdaltest.post_reason('fail')
print(res)
return 'fail'
ds = gdal.Open('WMTS:/vsimem/gdal_nominal_kvp.xml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
tmp_ds = gdal.GetDriverByName('MEM').Create('',256,256,4)
for i in range(4):
tmp_ds.GetRasterBand(i+1).Fill((i+1)*255/4)
tmp_ds = gdal.GetDriverByName('PNG').CreateCopy('/vsimem/nominal_kvp.xml?service=WMTS&request=GetTile&version=1.0.0&layer=lyr1&style=default_style&format=image/png&TileMatrixSet=tms&TileMatrix=0&TileRow=0&TileCol=0&time=2011-10-04', tmp_ds)
for i in range(4):
cs = ds.GetRasterBand(i+1).GetOverview(0).Checksum()
if cs != tmp_ds.GetRasterBand(i+1).Checksum():
gdaltest.post_reason('fail')
return 'fail'
ref_data = tmp_ds.ReadRaster(0,0,256,256)
got_data = ds.ReadRaster(0,0,ds.RasterXSize,ds.RasterYSize,256,256)
if ref_data != got_data:
gdaltest.post_reason('fail')
return 'fail'
ref_data = tmp_ds.GetRasterBand(1).ReadRaster(0,0,256,256)
got_data = ds.GetRasterBand(1).ReadRaster(0,0,ds.RasterXSize,ds.RasterYSize,256,256)
if ref_data != got_data:
gdaltest.post_reason('fail')
return 'fail'
ds = None
wmts_CleanCache()
return 'success'
###############################################################################
# AOI from layer WGS84BoundingBox
def wmts_16():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wmts_16.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier>lyr1</Identifier>
<Title>My layer1</Title>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-90 0</ows:LowerCorner>
<ows:UpperCorner>90 90</ows:UpperCorner>
</ows:WGS84BoundingBox>
<TileMatrixSetLink>
<TileMatrixSet>tms</TileMatrixSet>
</TileMatrixSetLink>
<Style isDefault="true">
<Identifier>default_style</Identifier>
<Title>Default style</Title>
</Style>
<ResourceURL format="image/png"
template="/vsimem/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" resourceType="tile"/>
</Layer>
<TileMatrixSet>
<Identifier>tms</Identifier>
<ows:Identifier>GoogleCRS84Quad</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::4326</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:0</ows:Identifier>
<ScaleDenominator>5.590822640287178E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:1</ows:Identifier>
<ScaleDenominator>2.795411320143589E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>2</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:2</ows:Identifier>
<ScaleDenominator>1.397705660071794E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>4</MatrixWidth>
<MatrixHeight>2</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
<ServiceMetadataURL xlink:href="/vsimem/wmts_16.xml"/>
</Capabilities>""")
ds = gdal.Open('WMTS:/vsimem/wmts_16.xml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.RasterXSize != 512:
gdaltest.post_reason('fail')
print(ds.RasterXSize)
return 'fail'
if ds.RasterYSize != 256:
gdaltest.post_reason('fail')
print(ds.RasterYSize)
return 'fail'
got_gt = ds.GetGeoTransform()
expected_gt = (-90, 0.3515625, 0.0, 90.0, 0.0, -0.3515625)
for i in range(6):
if abs(got_gt[i] - expected_gt[i]) > 1e-8:
gdaltest.post_reason('fail')
print(got_gt)
return 'fail'
if ds.GetProjectionRef().find('4326') < 0 or ds.GetProjectionRef().find('AXIS') >= 0:
gdaltest.post_reason('fail')
print(ds.GetProjectionRef())
return 'fail'
return 'success'
###############################################################################
# AOI from layer BoundingBox
def wmts_17():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wmts_17.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier>lyr1</Identifier>
<Title>My layer1</Title>
<ows:BoundingBox crs="urn:ogc:def:crs:EPSG::4326">
<ows:LowerCorner>0 -90</ows:LowerCorner>
<ows:UpperCorner>90 90</ows:UpperCorner>
</ows:BoundingBox>
<TileMatrixSetLink>
<TileMatrixSet>tms</TileMatrixSet>
</TileMatrixSetLink>
<Style isDefault="true">
<Identifier>default_style</Identifier>
<Title>Default style</Title>
</Style>
<ResourceURL format="image/png"
template="/vsimem/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" resourceType="tile"/>
</Layer>
<TileMatrixSet>
<Identifier>tms</Identifier>
<ows:Identifier>GoogleCRS84Quad</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::4326</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:0</ows:Identifier>
<ScaleDenominator>5.590822640287178E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:1</ows:Identifier>
<ScaleDenominator>2.795411320143589E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>2</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:2</ows:Identifier>
<ScaleDenominator>1.397705660071794E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>4</MatrixWidth>
<MatrixHeight>2</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
<ServiceMetadataURL xlink:href="/vsimem/wmts_17.xml"/>
</Capabilities>""")
ds = gdal.Open('WMTS:/vsimem/wmts_17.xml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.RasterXSize != 512:
gdaltest.post_reason('fail')
print(ds.RasterXSize)
return 'fail'
if ds.RasterYSize != 256:
gdaltest.post_reason('fail')
print(ds.RasterYSize)
return 'fail'
got_gt = ds.GetGeoTransform()
expected_gt = (-90, 0.3515625, 0.0, 90.0, 0.0, -0.3515625)
for i in range(6):
if abs(got_gt[i] - expected_gt[i]) > 1e-8:
gdaltest.post_reason('fail')
print(got_gt)
return 'fail'
if ds.GetProjectionRef().find('4326') < 0 or ds.GetProjectionRef().find('AXIS') >= 0:
gdaltest.post_reason('fail')
print(ds.GetProjectionRef())
return 'fail'
return 'success'
###############################################################################
# AOI from TileMatrixSet BoundingBox
def wmts_18():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wmts_18.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier>lyr1</Identifier>
<Title>My layer1</Title>
<TileMatrixSetLink>
<TileMatrixSet>tms</TileMatrixSet>
</TileMatrixSetLink>
<Style isDefault="true">
<Identifier>default_style</Identifier>
<Title>Default style</Title>
</Style>
<ResourceURL format="image/png"
template="/vsimem/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" resourceType="tile"/>
</Layer>
<TileMatrixSet>
<Identifier>tms</Identifier>
<ows:Identifier>GoogleCRS84Quad</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::4326</ows:SupportedCRS>
<ows:BoundingBox crs="urn:ogc:def:crs:EPSG::4326">
<ows:LowerCorner>0 -90</ows:LowerCorner>
<ows:UpperCorner>90 90</ows:UpperCorner>
</ows:BoundingBox>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:0</ows:Identifier>
<ScaleDenominator>5.590822640287178E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:1</ows:Identifier>
<ScaleDenominator>2.795411320143589E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>2</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:2</ows:Identifier>
<ScaleDenominator>1.397705660071794E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>4</MatrixWidth>
<MatrixHeight>2</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
<ServiceMetadataURL xlink:href="/vsimem/wmts_18.xml"/>
</Capabilities>""")
ds = gdal.Open('WMTS:/vsimem/wmts_18.xml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.RasterXSize != 512:
gdaltest.post_reason('fail')
print(ds.RasterXSize)
return 'fail'
if ds.RasterYSize != 256:
gdaltest.post_reason('fail')
print(ds.RasterYSize)
return 'fail'
got_gt = ds.GetGeoTransform()
expected_gt = (-90, 0.3515625, 0.0, 90.0, 0.0, -0.3515625)
for i in range(6):
if abs(got_gt[i] - expected_gt[i]) > 1e-8:
gdaltest.post_reason('fail')
print(got_gt)
return 'fail'
if ds.GetProjectionRef().find('4326') < 0 or ds.GetProjectionRef().find('AXIS') >= 0:
gdaltest.post_reason('fail')
print(ds.GetProjectionRef())
return 'fail'
return 'success'
###############################################################################
# AOI from TileMatrixSetLimits
def wmts_19():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wmts_19.xml', """<Capabilities>
<Contents>
<Layer>
<Identifier>lyr1</Identifier>
<Title>My layer1</Title>
<TileMatrixSetLink>
<TileMatrixSet>tms</TileMatrixSet>
<TileMatrixSetLimits>
<TileMatrixLimits>
<TileMatrix>GoogleCRS84Quad:2</TileMatrix>
<MinTileRow>0</MinTileRow>
<MaxTileRow>0</MaxTileRow>
<MinTileCol>1</MinTileCol>
<MaxTileCol>2</MaxTileCol>
</TileMatrixLimits>
</TileMatrixSetLimits>
</TileMatrixSetLink>
<Style isDefault="true">
<Identifier>default_style</Identifier>
<Title>Default style</Title>
</Style>
<ResourceURL format="image/png"
template="/vsimem/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" resourceType="tile"/>
</Layer>
<TileMatrixSet>
<Identifier>tms</Identifier>
<ows:Identifier>GoogleCRS84Quad</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::4326</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:0</ows:Identifier>
<ScaleDenominator>5.590822640287178E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:1</ows:Identifier>
<ScaleDenominator>2.795411320143589E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>2</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:2</ows:Identifier>
<ScaleDenominator>1.397705660071794E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>4</MatrixWidth>
<MatrixHeight>2</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
<ServiceMetadataURL xlink:href="/vsimem/wmts_19.xml"/>
</Capabilities>""")
ds = gdal.Open('WMTS:/vsimem/wmts_19.xml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.RasterXSize != 512:
gdaltest.post_reason('fail')
print(ds.RasterXSize)
return 'fail'
if ds.RasterYSize != 256:
gdaltest.post_reason('fail')
print(ds.RasterYSize)
return 'fail'
got_gt = ds.GetGeoTransform()
expected_gt = (-90, 0.3515625, 0.0, 90.0, 0.0, -0.3515625)
for i in range(6):
if abs(got_gt[i] - expected_gt[i]) > 1e-8:
gdaltest.post_reason('fail')
print(got_gt)
return 'fail'
if ds.GetProjectionRef().find('4326') < 0 or ds.GetProjectionRef().find('AXIS') >= 0:
gdaltest.post_reason('fail')
print(ds.GetProjectionRef())
return 'fail'
return 'success'
###############################################################################
# AOI from layer BoundingBox but restricted with TileMatrixSetLimits
def wmts_20():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wmts_20.xml', """<Capabilities>
<Contents>
<Layer>
<ows:BoundingBox crs="urn:ogc:def:crs:EPSG::4326">
<ows:LowerCorner>-90 -180</ows:LowerCorner>
<ows:UpperCorner>90 180</ows:UpperCorner>
</ows:BoundingBox>
<Identifier>lyr1</Identifier>
<Title>My layer1</Title>
<TileMatrixSetLink>
<TileMatrixSet>tms</TileMatrixSet>
<TileMatrixSetLimits>
<TileMatrixLimits>
<TileMatrix>GoogleCRS84Quad:2</TileMatrix>
<MinTileRow>0</MinTileRow>
<MaxTileRow>0</MaxTileRow>
<MinTileCol>1</MinTileCol>
<MaxTileCol>2</MaxTileCol>
</TileMatrixLimits>
</TileMatrixSetLimits>
</TileMatrixSetLink>
<Style isDefault="true">
<Identifier>default_style</Identifier>
<Title>Default style</Title>
</Style>
<ResourceURL format="image/png"
template="/vsimem/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" resourceType="tile"/>
</Layer>
<TileMatrixSet>
<Identifier>tms</Identifier>
<ows:Identifier>GoogleCRS84Quad</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::4326</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:0</ows:Identifier>
<ScaleDenominator>5.590822640287178E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:1</ows:Identifier>
<ScaleDenominator>2.795411320143589E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>2</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:2</ows:Identifier>
<ScaleDenominator>1.397705660071794E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>4</MatrixWidth>
<MatrixHeight>2</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
<ServiceMetadataURL xlink:href="/vsimem/wmts_20.xml"/>
</Capabilities>""")
ds = gdal.Open('WMTS:/vsimem/wmts_20.xml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.RasterXSize != 512:
gdaltest.post_reason('fail')
print(ds.RasterXSize)
return 'fail'
if ds.RasterYSize != 256:
gdaltest.post_reason('fail')
print(ds.RasterYSize)
return 'fail'
got_gt = ds.GetGeoTransform()
expected_gt = (-90, 0.3515625, 0.0, 90.0, 0.0, -0.3515625)
for i in range(6):
if abs(got_gt[i] - expected_gt[i]) > 1e-8:
gdaltest.post_reason('fail')
print(got_gt)
return 'fail'
if ds.GetProjectionRef().find('4326') < 0 or ds.GetProjectionRef().find('AXIS') >= 0:
gdaltest.post_reason('fail')
print(ds.GetProjectionRef())
return 'fail'
return 'success'
###############################################################################
# Test ExtendBeyondDateLine
def wmts_21():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wmts_21.xml', """<Capabilities>
<Contents>
<Layer>
<ows:BoundingBox crs="urn:ogc:def:crs:EPSG::4326">
<ows:LowerCorner>-90 -180</ows:LowerCorner>
<ows:UpperCorner>0 180</ows:UpperCorner>
</ows:BoundingBox>
<!-- completely made-up case and not really representative... -->
<ows:BoundingBox crs="urn:ogc:def:crs:OGC:2:84">
<ows:LowerCorner>90 -90</ows:LowerCorner>
<ows:UpperCorner>-90 0</ows:UpperCorner>
</ows:BoundingBox>
<Identifier>lyr1</Identifier>
<Title>My layer1</Title>
<Style isDefault="true">
<Identifier>default_style</Identifier>
<Title>Default style</Title>
</Style>
<TileMatrixSetLink>
<TileMatrixSet>tms</TileMatrixSet>
</TileMatrixSetLink>
<ResourceURL format="image/png"
template="/vsimem/wmts_21/{Style}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" resourceType="tile"/>
</Layer>
<TileMatrixSet>
<Identifier>tms</Identifier>
<ows:Identifier>GoogleCRS84Quad</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::4326</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:0</ows:Identifier>
<ScaleDenominator>5.590822640287178E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:1</ows:Identifier>
<ScaleDenominator>2.795411320143589E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>2</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>GoogleCRS84Quad:2</ows:Identifier>
<ScaleDenominator>1.397705660071794E8</ScaleDenominator>
<TopLeftCorner>90.0 -180.0</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>4</MatrixWidth>
<MatrixHeight>2</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
<ServiceMetadataURL xlink:href="/vsimem/wmts_21.xml"/>
</Capabilities>""")
ds = gdal.Open('WMTS:/vsimem/wmts_21.xml,extendbeyonddateline=yes')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.RasterXSize != 512:
gdaltest.post_reason('fail')
print(ds.RasterXSize)
return 'fail'
if ds.RasterYSize != 256:
gdaltest.post_reason('fail')
print(ds.RasterYSize)
return 'fail'
got_gt = ds.GetGeoTransform()
expected_gt = (90, 0.3515625, 0.0, 0.0, 0.0, -0.3515625)
for i in range(6):
if abs(got_gt[i] - expected_gt[i]) > 1e-8:
gdaltest.post_reason('fail')
print(got_gt)
return 'fail'
if ds.GetProjectionRef().find('4326') < 0 or ds.GetProjectionRef().find('AXIS') >= 0:
gdaltest.post_reason('fail')
print(ds.GetProjectionRef())
return 'fail'
tmp_ds = gdal.GetDriverByName('MEM').Create('',256,256,4)
for i in range(4):
tmp_ds.GetRasterBand(i+1).Fill(64)
tmp3_ds = gdal.GetDriverByName('PNG').CreateCopy('/vsimem/wmts_21/default_style/tms/GoogleCRS84Quad:2/1/3.png', tmp_ds)
tmp_ds = gdal.GetDriverByName('MEM').Create('',256,256,4)
for i in range(4):
tmp_ds.GetRasterBand(i+1).Fill(128)
tmp0_ds = gdal.GetDriverByName('PNG').CreateCopy('/vsimem/wmts_21/default_style/tms/GoogleCRS84Quad:2/1/0.png', tmp_ds)
if ds.GetRasterBand(1).ReadRaster(0,0,256,256) != tmp3_ds.GetRasterBand(1).ReadRaster(0,0,256,256):
gdaltest.post_reason('fail')
return 'fail'
if ds.GetRasterBand(1).ReadRaster(256,0,256,256) != tmp0_ds.GetRasterBand(1).ReadRaster(0,0,256,256):
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
#
def wmts_CleanCache():
hexstr = '012346789abcdef'
for i in range(len(hexstr)):
for j in range(len(hexstr)):
lst = gdal.ReadDir('/vsimem/cache/%s/%s' % (i, j))
if lst is not None:
for f in lst:
gdal.Unlink('/vsimem/cache/%s/%s/%s' % (i, j, f))
###############################################################################
#
def wmts_cleanup():
if gdaltest.wmts_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', None)
gdal.SetConfigOption('GDAL_DEFAULT_WMS_CACHE_PATH', None)
wmts_CleanCache()
lst = gdal.ReadDir('/vsimem/')
if lst:
for f in lst:
gdal.Unlink('/vsimem/' + f)
try:
shutil.rmtree('tmp/wmts_cache')
except:
pass
return 'success'
gdaltest_list = [
wmts_1,
wmts_2,
wmts_3,
wmts_4,
wmts_5,
wmts_6,
wmts_7,
wmts_8,
wmts_9,
wmts_10,
wmts_11,
wmts_12,
wmts_12bis,
wmts_13,
wmts_14,
wmts_15,
wmts_16,
wmts_17,
wmts_18,
wmts_19,
wmts_20,
wmts_21,
wmts_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'wmts' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| gpl-2.0 |
ZhangXinNan/tensorflow | tensorflow/contrib/rnn/python/ops/lstm_ops.py | 22 | 24792 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.ops import gen_lstm_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
LayerRNNCell = rnn_cell_impl.LayerRNNCell # pylint: disable=invalid-name
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, ci, f, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
Value to clip the 'cs' value to. Disable by setting to negative value.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
return gen_lstm_ops.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wcf = wci
wco = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = gen_lstm_ops.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wcf=wcf,
wco=wco,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip if cell_clip is not None else -1,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = gen_lstm_ops.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad,
b_grad) = gen_lstm_ops.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [
None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad
]
class LSTMBlockCell(LayerRNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add `forget_bias` (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike `rnn_cell_impl.LSTMCell`, this is a monolithic op and should be much
faster. The weight and bias matrices should be compatible as long as the
variable scope matches.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
name="lstm_cell"):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.nn.rnn_cell.LSTMCell`.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMBlockCell instead.
"""
super(LSTMBlockCell, self).__init__(_reuse=reuse, name=name)
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
self._cell_clip = cell_clip if cell_clip is not None else -1
self._names = {
"W": "kernel",
"b": "bias",
"wci": "w_i_diag",
"wcf": "w_f_diag",
"wco": "w_o_diag",
"scope": "lstm_cell"
}
# Inputs must be 2-dimensional.
self.input_spec = base_layer.InputSpec(ndim=2)
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
if not inputs_shape[1].value:
raise ValueError(
"Expecting inputs_shape[1] to be set: %s" % str(inputs_shape))
input_size = inputs_shape[1].value
self._kernel = self.add_variable(
self._names["W"], [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
self._names["b"], [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable(self._names["wci"], [self._num_units])
self._w_f_diag = self.add_variable(self._names["wcf"], [self._num_units])
self._w_o_diag = self.add_variable(self._names["wco"], [self._num_units])
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM)."""
if len(state) != 2:
raise ValueError("Expecting state to be a tuple with length 2.")
if self._use_peephole:
wci = self._w_i_diag
wcf = self._w_f_diag
wco = self._w_o_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = state
(_, cs, _, _, _, _, h) = _lstm_block_cell(
inputs,
cs_prev,
h_prev,
self._kernel,
self._bias,
wci=wci,
wcf=wcf,
wco=wco,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
return h, new_state
class LSTMBlockWrapper(base_layer.Layer):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def call(self, inputs, initial_state=None, dtype=None, sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" % inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError("Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(
inputs, initial_cell_state, initial_output, dtype, sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(sequence_length, time_len, dtype=dtype),
[1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unstack(outputs)
final_state = rnn_cell_impl.LSTMStateTuple(final_cell_state, final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
return array_ops.gather_nd(
data, array_ops.stack([indices, math_ops.range(batch_size)], axis=1))
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell_impl.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
name="lstm_fused_cell"):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Default is no cell clipping.
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.nn.rnn_cell.LSTMCell`.
"""
super(LSTMBlockFusedCell, self).__init__(_reuse=reuse, name=name)
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip if cell_clip is not None else -1
self._use_peephole = use_peephole
# Inputs must be 3-dimensional.
self.input_spec = base_layer.InputSpec(ndim=3)
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def build(self, input_shape):
input_size = input_shape[2].value
self._kernel = self.add_variable(
"kernel", [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
"bias", [self._num_units * 4],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable("w_i_diag", [self._num_units])
self._w_f_diag = self.add_variable("w_f_diag", [self._num_units])
self._w_o_diag = self.add_variable("w_o_diag", [self._num_units])
self.built = True
def _call_cell(self,
inputs,
initial_cell_state=None,
initial_output=None,
dtype=None,
sequence_length=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
if self._use_peephole:
wci = self._w_i_diag
wco = self._w_o_diag
wcf = self._w_f_diag
else:
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=self._kernel,
wci=wci,
wcf=wcf,
wco=wco,
b=self._bias,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
| apache-2.0 |
jinankjain/zamboni | mkt/reviewers/tests/test_forms.py | 4 | 1746 | from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from mock import patch
from nose.tools import eq_
import amo
import amo.tests
from mkt.comm.forms import CommAttachmentForm
@patch.object(settings, 'MAX_REVIEW_ATTACHMENT_UPLOAD_SIZE', 1024)
class TestReviewAppAttachmentForm(amo.tests.TestCase):
def setUp(self):
self.max_size = settings.MAX_REVIEW_ATTACHMENT_UPLOAD_SIZE
def post_data(self, **kwargs):
post_data = {
'description': 'My Test File'
}
post_data.update(kwargs)
return post_data
def file_data(self, size=1024):
file_data = {
'attachment': None
}
if size:
file_data['attachment'] = SimpleUploadedFile('bacon.txt',
' ' * size)
return file_data
def test_no_attachment(self):
file_data = self.file_data(size=0)
self.check_valid(False, file_data=file_data)
def test_no_description(self):
post_data = self.post_data(description=None)
self.check_valid(True, post_data=post_data)
def test_attachment_okay(self):
file_data = self.file_data(size=self.max_size)
self.check_valid(True, file_data=file_data)
def test_attachment_too_large(self):
file_data = self.file_data(size=self.max_size + 1)
self.check_valid(False, file_data=file_data)
def check_valid(self, valid, post_data=None, file_data=None):
if not post_data:
post_data = self.post_data()
if not file_data:
file_data = self.file_data()
form = CommAttachmentForm(post_data, file_data)
eq_(form.is_valid(), valid)
| bsd-3-clause |
HumanExposure/factotum | dashboard/views/functional_use_curation.py | 1 | 5589 | from dashboard.models.functional_use import FunctionalUseToRawChem
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, reverse, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.db.models import Count, F
from dashboard.models import FunctionalUse, FunctionalUseCategory
from django.http import JsonResponse
import json
from django.db.models import Count, F, Exists, Case, When, Q, Value
from django.views.generic import TemplateView
from django_datatables_view.base_datatable_view import BaseDatatableView
from dashboard.models import FunctionalUse, RawChem
@login_required()
def functional_use_cleanup(request):
if request.method == "POST":
# remove all reported functional uses that are not associated with chemicals
result = (
FunctionalUse.objects.annotate(chem_count=Count("chemicals"))
.filter(chem_count=0)
.delete()
)
removed_count = result[0]
if removed_count > 0:
messages.success(
request,
f"{removed_count} reported functional use(s) successfully removed",
)
else:
messages.warning(request, "No reported functional use removed")
return redirect("functional_use_curation")
@login_required()
def functional_use_curation(request):
template_name = "functional_use_curation/functional_use_curation.html"
if request.method == "POST":
cat = json.loads(request.POST.get("json") or "{}")
response_data = {"result": "", "message": ""}
if (
cat.keys() >= {"pk", "category", "newcategory"}
and cat["category"] != cat["newcategory"]
):
fu = FunctionalUse.objects.get(pk=cat["pk"])
fu.category_id = cat["newcategory"]
fu.save()
response_data["result"] = "success"
response_data["message"] = "Harmonized Category Updated"
return JsonResponse(response_data)
combinations = (
FunctionalUse.objects.values(
"pk",
"report_funcuse",
"category",
newcategory=F("category"),
categorytitle=F("category__title"),
)
.annotate(fu_count=Count("chemicals"))
.order_by("report_funcuse", "category__title")
)
categories = FunctionalUseCategory.objects.values("id", "title").order_by("title")
categorylist = [{"id": "", "title": ""}] + list(categories)
return render(
request,
template_name,
{"combinations": list(combinations), "categories": categorylist},
)
class FunctionalUseCurationChemicals(LoginRequiredMixin, TemplateView):
template_name = "functional_use_curation/functional_use_curation_chemicals.html"
table_settings = {"pagination": True, "pageLength": 50}
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["functional_use"] = get_object_or_404(
FunctionalUse, pk=self.kwargs.get("functional_use_pk")
)
self.table_settings["ajax"] = reverse(
"functional_use_curation_chemicals_table", kwargs=self.kwargs
)
context["table_settings"] = self.table_settings
return context
class FunctionalUseCurationChemicalsTable(BaseDatatableView):
model = RawChem
def get_filter_method(self):
""" Returns preferred filter method """
return self.FILTER_ICONTAINS
def render_column(self, row, column):
if column == "preferred_name":
return f"""
<a href='{row.extracted_text.data_document.get_absolute_url()}'>
{row.preferred_name}
</a>"""
if column == "functional_uses":
return ", ".join(
row.functional_uses.values_list("report_funcuse", flat=True)
)
if column == "extracted_text__data_document__title":
return row.extracted_text.data_document.title
return super().render_column(row, column)
def get_initial_queryset(self):
functional_use = get_object_or_404(
FunctionalUse, pk=self.kwargs.get("functional_use_pk")
)
qs = super().get_initial_queryset()
qs = (
qs.filter(functional_uses=functional_use)
.annotate(
preferred_name=Case(
# no dsstox
When(dsstox__isnull=False, then=F("dsstox__true_chemname")),
# not blank raw_chem_name
When(~Q(raw_chem_name=""), then=F("raw_chem_name")),
# no true chem no raw_chem_name
default=Value("Unnamed Chemical"),
)
)
.order_by("pk")
)
return qs
@login_required()
def unassign_functional_uses(request, functional_use_pk):
"""
Delete the records in the FunctionalUseToRawChem join table
where the functional use is the one identified in the view's argument.
"""
rfu = get_object_or_404(FunctionalUse, pk=functional_use_pk)
functionalusetorawchems = FunctionalUseToRawChem.objects.filter(
functional_use_id=functional_use_pk
)
if request.method == "POST":
functionalusetorawchems.delete()
return redirect("functional_use_curation")
return render(
request,
"functional_use_curation/confirm_functional_use_removal.html",
{"object": rfu},
)
| gpl-3.0 |
hr87/advcubit | tests/block.py | 2 | 1864 | """ block module testing
"""
import unittest
import advcubit.system_module as _system
import advcubit.utility_module as _utility
import advcubit.block_module as _block
class BlockTest(unittest.TestCase):
def setUp(self):
""" test set up function """
_utility.startCubit()
_utility.newFile()
def tearDown(self):
""" test shutdown function """
_utility.closeCubit()
def test_create_block(self):
v = _system.cubitWrapper.brick(1, 1, 1)
v.volumes()[0].mesh()
try:
_block.createBlock(v, 33)
except _system.AdvCubitException as e:
self.assertTrue(False, str(e))
def test_element_type(self):
v = _system.cubitWrapper.brick(1, 1, 1)
v.volumes()[0].mesh()
_block.createBlock(v, 33)
try:
_block.setElementType(33, _block.VolumeElementTypes.HEX8)
except _system.AdvCubitException as e:
self.assertTrue(False, str(e))
def test_name_block(self):
v = _system.cubitWrapper.brick(1, 1, 1)
v.volumes()[0].mesh()
_block.createBlock(v, 33)
try:
_block.nameBlock(33, 'testName')
except _system.AdvCubitException as e:
self.assertTrue(False, str(e))
def test_block_element(self):
v = _system.cubitWrapper.brick(1, 1, 1)
v.volumes()[0].mesh()
try:
_block.createBlockFromElements(33, 'hex', v.volumes()[0])
except _system.AdvCubitException as e:
self.assertTrue(False, str(e))
def testSuite():
blockSuite = unittest.TestSuite()
blockSuite.addTest(BlockTest('test_create_block'))
blockSuite.addTest(BlockTest('test_element_type'))
blockSuite.addTest(BlockTest('test_name_block'))
blockSuite.addTest(BlockTest('test_block_element'))
return blockSuite | lgpl-2.1 |
guschmue/tensorflow | tensorflow/python/ops/nn_fused_batchnorm_test.py | 4 | 21424 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fused_batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test
class BatchNormalizationTest(test.TestCase):
def _batch_norm(self, x, mean, var, offset, scale, epsilon):
# We compute the batch norm manually in this function because
# nn_impl.batch_normalization does not support float16 yet.
# TODO(reedwm): Add float16 support to nn_impl.batch_normalization.
inv = math_ops.rsqrt(var + epsilon) * scale
y = math_ops.cast(x, scale.dtype) * inv + (offset - mean * inv)
return math_ops.cast(y, x.dtype)
def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
y = self._batch_norm(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return y.eval()
def _test_inference(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
mean = constant_op.constant(mean_val, name='mean')
var = constant_op.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=var,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val = sess.run(y)
y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,
data_format)
# An atol value of 1e-3 is too small for float16's, because some adjacent
# float16 values that y_val can take are greater than 1e-3 apart, e.g.
# 2.16602 and 2.16797.
atol = 2e-3 if x_dtype == np.float16 else 1e-3
self.assertAllClose(y_ref, y_val, atol=atol)
def _training_ref(self, x, scale, offset, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
mean, var = nn_impl.moments(
math_ops.cast(x, scale.dtype), [0, 1, 2], keep_dims=False)
y = self._batch_norm(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return y.eval(), mean.eval(), var.eval()
def _test_training(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
epsilon = 0.001
y, mean, var = nn_impl.fused_batch_norm(
x,
scale,
offset,
epsilon=epsilon,
data_format=data_format,
is_training=True)
y_val, mean_val, var_val = sess.run([y, mean, var])
y_ref, mean_ref, var_ref = self._training_ref(x, scale, offset, epsilon,
data_format)
y_atol = 2e-3 if x_dtype == np.float16 else 1e-3
self.assertAllClose(y_ref, y_val, atol=y_atol)
self.assertAllClose(mean_ref, mean_val, atol=1e-3)
# This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as
# the denominator in the formula to calculate variance, while
# tf.nn.fused_batch_norm has Bessel's correction built in.
sample_size = x_val.size / scale_val.size
var_ref = var_ref * sample_size / (max(sample_size - 1.0, 1.0))
self.assertAllClose(var_ref, var_val, atol=1e-3)
def _compute_gradient_error_float16(self, x, x32, x_shape, y, y32, y_shape):
"""Computes the gradient error for float16 inputs and/or outputs.
This returns the same value as gradient_checker.compute_gradient_error. The
difference is that gradient_checker.compute_gradient_error does not
numerically compute the gradients in a numerically stable way for float16
tensors. To fix this, this function requires float32 versions of x and y to
numerically compute the gradients, to compare with the float16 symbolically
computed gradients.
Args:
x: The input tensor.
x32: A float32 version of x.
x_shape: The shape of x.
y: The output tensor.
y32: A float32 version of y. Must be calculated based on x32, not x.
y_shape: The shape of y.
Returns:
The maximum error in between the two Jacobians, as in
gradient_checker.compute_gradient_error.
"""
x_init_val = np.random.random_sample(x_shape).astype(np.float16)
x32_init_val = x_init_val.astype(np.float32)
# TODO(reedwm): Do not perform the unnecessary computations in
# compute_gradient, since they double the computation time of this function.
theoretical_grad, _ = gradient_checker.compute_gradient(
x, x_shape, y, y_shape, delta=1e-3, x_init_value=x_init_val)
_, numerical_grad = gradient_checker.compute_gradient(
x32, x_shape, y32, y_shape, delta=1e-3, x_init_value=x32_init_val)
return np.fabs(theoretical_grad - numerical_grad).max()
def _test_gradient(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC',
is_training=True):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)
pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
if x_dtype != np.float16:
err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, y, x_shape)
err_offset = gradient_checker.compute_gradient_error(
offset, scale_shape, y, x_shape)
else:
x32 = constant_op.constant(x_val, name='x32', dtype=dtypes.float32)
y32, _, _ = nn_impl.fused_batch_norm(
x32,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
err_x = self._compute_gradient_error_float16(x, x32, x_shape, y, y32,
x_shape)
err_scale = self._compute_gradient_error_float16(
scale, scale, scale_shape, y, y32, x_shape)
err_offset = self._compute_gradient_error_float16(
offset, offset, scale_shape, y, y32, x_shape)
x_err_tolerance = 2e-3 if x_dtype == np.float16 else 1e-3
scale_err_tolerance = 1e-3
self.assertLess(err_x, x_err_tolerance)
self.assertLess(err_scale, scale_err_tolerance)
self.assertLess(err_offset, scale_err_tolerance)
def _test_grad_grad(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
data_format='NHWC',
is_training=True,
err_tolerance=1e-3):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
grad_y_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
grad_y = constant_op.constant(grad_y_val, name='grad_y')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)
pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
grad_x, grad_scale, grad_offset = gradients_impl.gradients(
y, [x, scale, offset], grad_y)
if is_training:
epsilon = y.op.get_attr('epsilon')
data_format = y.op.get_attr('data_format')
grad_vals = sess.run([grad_x, grad_scale, grad_offset])
grad_internal = nn_grad._BatchNormGrad(grad_y, x, scale, pop_mean, pop_var, epsilon, data_format)
grad_internal_vals = sess.run(list(grad_internal))
for grad_val, grad_internal_val in zip(grad_vals, grad_internal_vals):
self.assertAllClose(grad_val, grad_internal_val, atol=err_tolerance)
if x_dtype != np.float16:
err_grad_grad_y_1 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_x, x_shape)
err_grad_grad_y_2 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_scale, scale_shape)
err_grad_grad_y_3 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_offset, scale_shape)
# In freeze mode, grad_x is not a function of x.
if is_training:
err_grad_x_1 = gradient_checker.compute_gradient_error(
x, x_shape, grad_x, x_shape)
err_grad_x_2 = gradient_checker.compute_gradient_error(
x, x_shape, grad_scale, scale_shape)
err_grad_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, grad_x, x_shape)
else:
x32 = constant_op.constant(x_val, dtype=dtypes.float32, name='x32')
grad_y32 = constant_op.constant(
grad_y_val, dtype=dtypes.float32, name='grad_y32')
y32, _, _ = nn_impl.fused_batch_norm(
x32,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
grad_x32, grad_scale32, grad_offset32 = gradients_impl.gradients(
y32, [x32, scale, offset], grad_y32)
err_grad_grad_y_1 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_x, grad_x32, x_shape)
err_grad_grad_y_2 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_scale, grad_scale32, scale_shape)
err_grad_grad_y_3 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_offset, grad_offset32, scale_shape)
# In freeze mode, grad_x is not a function of x.
if is_training:
err_grad_x_1 = self._compute_gradient_error_float16(
x, x32, x_shape, grad_x, grad_x32, x_shape)
err_grad_x_2 = self._compute_gradient_error_float16(
x, x32, x_shape, grad_scale, grad_scale32, scale_shape)
err_grad_scale = self._compute_gradient_error_float16(
scale, scale, scale_shape, grad_x, grad_x32, x_shape)
self.assertLess(err_grad_grad_y_1, err_tolerance)
self.assertLess(err_grad_grad_y_2, err_tolerance)
self.assertLess(err_grad_grad_y_3, err_tolerance)
if is_training:
self.assertLess(err_grad_x_1, err_tolerance)
self.assertLess(err_grad_x_2, err_tolerance)
self.assertLess(err_grad_scale, err_tolerance)
def testInference(self):
x_shape = [1, 1, 6, 1]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_inference(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW')
self._test_inference(
x_shape, dtype, [1], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_inference(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, dtype, [2], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_inference(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_inference(
x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW')
self._test_inference(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
self._test_inference(
x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC')
def testTraining(self):
x_shape = [1, 1, 6, 1]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW')
self._test_training(
x_shape, dtype, [1], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_training(
x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_training(
x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW')
self._test_training(
x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC')
self._test_training(
x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC')
def testBatchNormGrad(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 1]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype, [1],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [1],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [1],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 1, 6, 2]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype, [2],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [2],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
for dtype in [np.float16, np.float32]:
self._test_gradient(
x_shape,
dtype, [2],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training)
x_shape = [5, 7, 11, 4]
for dtype in [np.float16, np.float32]:
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape,
dtype, [7],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [4],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape,
dtype, [4],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training)
def _testBatchNormGradGrad(self, config):
shape = config['shape']
err_tolerance = config['err_tolerance']
dtype = config['dtype']
for is_training in [True, False]:
if test.is_gpu_available(cuda_only=True):
self._test_grad_grad(
shape,
dtype, [shape[3]],
np.float32,
use_gpu=True,
data_format='NHWC',
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape,
dtype, [shape[1]],
np.float32,
use_gpu=True,
data_format='NCHW',
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape,
dtype, [shape[3]],
np.float32,
use_gpu=False,
data_format='NHWC',
is_training=is_training,
err_tolerance=err_tolerance)
def testBatchNormGradGrad(self):
configs = [{
'shape': [2, 3, 4, 5],
'err_tolerance': 1e-2,
'dtype': np.float32,
}, {
'shape': [2, 3, 2, 2],
'err_tolerance': 1e-3,
'dtype': np.float32,
}, {
'shape': [2, 3, 4, 5],
'err_tolerance': 1e-2,
'dtype': np.float16,
}, {
'shape': [2, 3, 2, 2],
'err_tolerance': 2e-3,
'dtype': np.float16,
}]
for config in configs:
self._testBatchNormGradGrad(config)
if __name__ == '__main__':
test.main()
| apache-2.0 |
mihailignatenko/erp | addons/base_gengo/res_company.py | 321 | 1890 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.Model):
_name = "res.company"
_inherit = "res.company"
_columns = {
"gengo_private_key": fields.text("Gengo Private Key", copy=False, groups="base.group_system"),
"gengo_public_key": fields.text("Gengo Public Key", copy=False, groups="base.group_user"),
"gengo_comment": fields.text("Comments", help="This comment will be automatically be enclosed in each an every request sent to Gengo", groups="base.group_user"),
"gengo_auto_approve": fields.boolean("Auto Approve Translation ?", help="Jobs are Automatically Approved by Gengo.", groups="base.group_user"),
"gengo_sandbox": fields.boolean("Sandbox Mode", help="Check this box if you're using the sandbox mode of Gengo, mainly used for testing purpose."),
}
_defaults = {
"gengo_auto_approve": True,
}
| agpl-3.0 |
Serveurperso/SiK | Firmware/tools/reflector.py | 26 | 1218 | #!/usr/bin/env python
# reflect input bytes to output, printing as it goes
import serial, sys, optparse
parser = optparse.OptionParser("reflector")
parser.add_option("--baudrate", type='int', default=57600, help='baud rate')
parser.add_option("--echo", action='store_true', default=False, help='echo to stdout')
parser.add_option("--rtscts", action='store_true', default=False, help='enable rtscts')
parser.add_option("--dsrdtr", action='store_true', default=False, help='enable dsrdtr')
parser.add_option("--xonxoff", action='store_true', default=False, help='enable xonxoff')
opts, args = parser.parse_args()
if len(args) != 1:
print("usage: reflector.py <DEVICE>")
sys.exit(1)
device = args[0]
port = serial.Serial(device, opts.baudrate, timeout=1,
dsrdtr=opts.dsrdtr, rtscts=opts.rtscts, xonxoff=opts.xonxoff)
while True:
try:
count = port.inWaiting()
if count == 0:
count = 1
buf = port.read(count)
if len(buf) == 0:
continue
if opts.echo:
sys.stdout.write(buf)
sys.stdout.flush()
port.write(buf)
port.flush()
except KeyboardInterrupt:
sys.exit(0)
| bsd-2-clause |
spapadim/OctoPrint | src/octoprint/plugins/softwareupdate/updaters/pip.py | 7 | 2496 | # coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import logging
import pkg_resources
from octoprint.util.pip import PipCaller, UnknownPip
logger = logging.getLogger("octoprint.plugins.softwareupdate.updaters.pip")
console_logger = logging.getLogger("octoprint.plugins.softwareupdate.updaters.pip.console")
_pip_callers = dict()
_pip_version_dependency_links = pkg_resources.parse_version("1.5")
def can_perform_update(target, check):
pip_caller = _get_pip_caller(command=check["pip_command"] if "pip_command" in check else None)
return "pip" in check and pip_caller is not None and pip_caller.available
def _get_pip_caller(command=None):
key = command
if command is None:
key = "__default"
if not key in _pip_callers:
try:
_pip_callers[key] = PipCaller(configured=command)
_pip_callers[key].on_log_call = _log_call
_pip_callers[key].on_log_stdout = _log_stdout
_pip_callers[key].on_log_stderr = _log_stderr
except UnknownPip:
_pip_callers[key] = None
return _pip_callers[key]
def perform_update(target, check, target_version):
pip_command = None
if "pip_command" in check:
pip_command = check["pip_command"]
pip_caller = _get_pip_caller(command=pip_command)
if pip_caller is None:
raise RuntimeError("Can't run pip")
install_arg = check["pip"].format(target_version=target_version)
logger.debug(u"Target: %s, executing pip install %s" % (target, install_arg))
pip_args = ["install", check["pip"].format(target_version=target_version, target=target_version)]
if "dependency_links" in check and check["dependency_links"] and pip_caller >= _pip_version_dependency_links:
pip_args += ["--process-dependency-links"]
pip_caller.execute(*pip_args)
logger.debug(u"Target: %s, executing pip install %s --ignore-reinstalled --force-reinstall --no-deps" % (target, install_arg))
pip_args += ["--ignore-installed", "--force-reinstall", "--no-deps"]
pip_caller.execute(*pip_args)
return "ok"
def _log_call(*lines):
_log(lines, prefix=u" ")
def _log_stdout(*lines):
_log(lines, prefix=u">")
def _log_stderr(*lines):
_log(lines, prefix=u"!")
def _log(lines, prefix=None):
lines = map(lambda x: x.strip(), lines)
for line in lines:
console_logger.debug(u"{prefix} {line}".format(**locals()))
| agpl-3.0 |
xuweiliang/Codelibrary | openstack_dashboard/dashboards/project/access_and_security/tabs.py | 9 | 5556 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from neutronclient.common import exceptions as neutron_exc
from openstack_dashboard.api import keystone
from openstack_dashboard.api import network
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.project.access_and_security.\
api_access.tables import EndpointsTable
from openstack_dashboard.dashboards.project.access_and_security.\
floating_ips.tables import FloatingIPsTable
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.tables import KeypairsTable
from openstack_dashboard.dashboards.project.access_and_security.\
security_groups.tables import SecurityGroupsTable
class SecurityGroupsTab(tabs.TableTab):
table_classes = (SecurityGroupsTable,)
name = _("Security Groups")
slug = "security_groups_tab"
template_name = "horizon/common/_detail_table.html"
permissions = ('openstack.services.compute',)
def get_security_groups_data(self):
try:
security_groups = network.security_group_list(self.request)
except neutron_exc.ConnectionFailed:
security_groups = []
exceptions.handle(self.request)
except Exception:
security_groups = []
exceptions.handle(self.request,
_('Unable to retrieve security groups.'))
return sorted(security_groups, key=lambda group: group.name)
class KeypairsTab(tabs.TableTab):
table_classes = (KeypairsTable,)
name = _("Key Pairs")
slug = "keypairs_tab"
template_name = "horizon/common/_detail_table.html"
permissions = ('openstack.services.compute',)
def get_keypairs_data(self):
try:
keypairs = nova.keypair_list(self.request)
except Exception:
keypairs = []
exceptions.handle(self.request,
_('Unable to retrieve key pair list.'))
return keypairs
class FloatingIPsTab(tabs.TableTab):
table_classes = (FloatingIPsTable,)
name = _("Floating IPs")
slug = "floating_ips_tab"
template_name = "horizon/common/_detail_table.html"
permissions = ('openstack.services.compute',)
def get_floating_ips_data(self):
try:
floating_ips = network.tenant_floating_ip_list(self.request)
except neutron_exc.ConnectionFailed:
floating_ips = []
exceptions.handle(self.request)
except Exception:
floating_ips = []
exceptions.handle(self.request,
_('Unable to retrieve floating IP addresses.'))
try:
floating_ip_pools = network.floating_ip_pools_list(self.request)
except neutron_exc.ConnectionFailed:
floating_ip_pools = []
exceptions.handle(self.request)
except Exception:
floating_ip_pools = []
exceptions.handle(self.request,
_('Unable to retrieve floating IP pools.'))
pool_dict = dict([(obj.id, obj.name) for obj in floating_ip_pools])
attached_instance_ids = [ip.instance_id for ip in floating_ips
if ip.instance_id is not None]
if attached_instance_ids:
instances = []
try:
# TODO(tsufiev): we should pass attached_instance_ids to
# nova.server_list as soon as Nova API allows for this
instances, has_more = nova.server_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve instance list.'))
instances_dict = dict([(obj.id, obj.name) for obj in instances])
for ip in floating_ips:
ip.instance_name = instances_dict.get(ip.instance_id)
ip.pool_name = pool_dict.get(ip.pool, ip.pool)
return floating_ips
def allowed(self, request):
return network.floating_ip_supported(request)
class APIAccessTab(tabs.TableTab):
table_classes = (EndpointsTable,)
name = _("API Access")
slug = "api_access_tab"
template_name = "horizon/common/_detail_table.html"
def get_endpoints_data(self):
services = []
for i, service in enumerate(self.request.user.service_catalog):
service['id'] = i
services.append(
keystone.Service(service, self.request.user.services_region))
return services
class AccessAndSecurityTabs(tabs.TabGroup):
slug = "access_security_tabs"
tabs = (SecurityGroupsTab, KeypairsTab, FloatingIPsTab, APIAccessTab)
sticky = True
| apache-2.0 |
AViisiion/namebench | nb_third_party/graphy/common.py | 205 | 15215 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code common to all chart types."""
import copy
import warnings
from graphy import formatters
from graphy import util
class Marker(object):
"""Represents an abstract marker, without position. You can attach these to
a DataSeries.
Object attributes:
shape: One of the shape codes (Marker.arrow, Marker.diamond, etc.)
color: color (as hex string, f.ex. '0000ff' for blue)
size: size of the marker
"""
# TODO: Write an example using markers.
# Shapes:
arrow = 'a'
cross = 'c'
diamond = 'd'
circle = 'o'
square = 's'
x = 'x'
# Note: The Google Chart API also knows some other markers ('v', 'V', 'r',
# 'b') that I think would fit better into a grid API.
# TODO: Make such a grid API
def __init__(self, shape, color, size):
"""Construct a Marker. See class docstring for details on args."""
# TODO: Shapes 'r' and 'b' would be much easier to use if they had a
# special-purpose API (instead of trying to fake it with markers)
self.shape = shape
self.color = color
self.size = size
class _BasicStyle(object):
"""Basic style object. Used internally."""
def __init__(self, color):
self.color = color
class DataSeries(object):
"""Represents one data series for a chart (both data & presentation
information).
Object attributes:
points: List of numbers representing y-values (x-values are not specified
because the Google Chart API expects even x-value spacing).
label: String with the series' label in the legend. The chart will only
have a legend if at least one series has a label. If some series
do not have a label then they will have an empty description in
the legend. This is currently a limitation in the Google Chart
API.
style: A chart-type-specific style object. (LineStyle for LineChart,
BarsStyle for BarChart, etc.)
markers: List of (x, m) tuples where m is a Marker object and x is the
x-axis value to place it at.
The "fill" markers ('r' & 'b') are a little weird because they
aren't a point on a line. For these, you can fake it by
passing slightly weird data (I'd like a better API for them at
some point):
For 'b', you attach the marker to the starting series, and set x
to the index of the ending line. Size is ignored, I think.
For 'r', you can attach to any line, specify the starting
y-value for x and the ending y-value for size. Y, in this case,
is becase 0.0 (bottom) and 1.0 (top).
color: DEPRECATED
"""
# TODO: Should we require the points list to be non-empty ?
# TODO: Do markers belong here? They are really only used for LineCharts
def __init__(self, points, label=None, style=None, markers=None, color=None):
"""Construct a DataSeries. See class docstring for details on args."""
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! Label is a hex triplet. Maybe '
'it is a color? The old argument order (color & style '
'before label) is deprecated.', DeprecationWarning,
stacklevel=2)
if color is not None:
warnings.warn('Passing color is deprecated. Pass a style object '
'instead.', DeprecationWarning, stacklevel=2)
# Attempt to fix it for them. If they also passed a style, honor it.
if style is None:
style = _BasicStyle(color)
if style is not None and isinstance(style, basestring):
warnings.warn('Your code is broken! Style is a string, not an object. '
'Maybe you are passing a color? Passing color is '
'deprecated; pass a style object instead.',
DeprecationWarning, stacklevel=2)
if style is None:
style = _BasicStyle(None)
self.data = points
self.style = style
self.markers = markers or []
self.label = label
def _GetColor(self):
warnings.warn('DataSeries.color is deprecated, use '
'DataSeries.style.color instead.', DeprecationWarning,
stacklevel=2)
return self.style.color
def _SetColor(self, color):
warnings.warn('DataSeries.color is deprecated, use '
'DataSeries.style.color instead.', DeprecationWarning,
stacklevel=2)
self.style.color = color
color = property(_GetColor, _SetColor)
def _GetStyle(self):
return self._style;
def _SetStyle(self, style):
if style is not None and callable(style):
warnings.warn('Your code may be broken ! LineStyle.solid and similar '
'are no longer constants, but class methods that '
'create LineStyle instances. Change your code to call '
'LineStyle.solid() instead of passing it as a value.',
DeprecationWarning, stacklevel=2)
self._style = style()
else:
self._style = style
style = property(_GetStyle, _SetStyle)
class AxisPosition(object):
"""Represents all the available axis positions.
The available positions are as follows:
AxisPosition.TOP
AxisPosition.BOTTOM
AxisPosition.LEFT
AxisPosition.RIGHT
"""
LEFT = 'y'
RIGHT = 'r'
BOTTOM = 'x'
TOP = 't'
class Axis(object):
"""Represents one axis.
Object setings:
min: Minimum value for the bottom or left end of the axis
max: Max value.
labels: List of labels to show along the axis.
label_positions: List of positions to show the labels at. Uses the scale
set by min & max, so if you set min = 0 and max = 10, then
label positions [0, 5, 10] would be at the bottom,
middle, and top of the axis, respectively.
grid_spacing: Amount of space between gridlines (in min/max scale).
A value of 0 disables gridlines.
label_gridlines: If True, draw a line extending from each label
on the axis all the way across the chart.
"""
def __init__(self, axis_min=None, axis_max=None):
"""Construct a new Axis.
Args:
axis_min: smallest value on the axis
axis_max: largest value on the axis
"""
self.min = axis_min
self.max = axis_max
self.labels = []
self.label_positions = []
self.grid_spacing = 0
self.label_gridlines = False
# TODO: Add other chart types. Order of preference:
# - scatter plots
# - us/world maps
class BaseChart(object):
"""Base chart object with standard behavior for all other charts.
Object attributes:
data: List of DataSeries objects. Chart subtypes provide convenience
functions (like AddLine, AddBars, AddSegment) to add more series
later.
left/right/bottom/top: Axis objects for the 4 different axes.
formatters: A list of callables which will be used to format this chart for
display. TODO: Need better documentation for how these
work.
auto_scale, auto_color, auto_legend:
These aliases let users access the default formatters without poking
around in self.formatters. If the user removes them from
self.formatters then they will no longer be enabled, even though they'll
still be accessible through the aliases. Similarly, re-assigning the
aliases has no effect on the contents of self.formatters.
display: This variable is reserved for backends to populate with a display
object. The intention is that the display object would be used to
render this chart. The details of what gets put here depends on
the specific backend you are using.
"""
# Canonical ordering of position keys
_POSITION_CODES = 'yrxt'
# TODO: Add more inline args to __init__ (esp. labels).
# TODO: Support multiple series in the constructor, if given.
def __init__(self):
"""Construct a BaseChart object."""
self.data = []
self._axes = {}
for code in self._POSITION_CODES:
self._axes[code] = [Axis()]
self._legend_labels = [] # AutoLegend fills this out
self._show_legend = False # AutoLegend fills this out
# Aliases for default formatters
self.auto_color = formatters.AutoColor()
self.auto_scale = formatters.AutoScale()
self.auto_legend = formatters.AutoLegend
self.formatters = [self.auto_color, self.auto_scale, self.auto_legend]
# display is used to convert the chart into something displayable (like a
# url or img tag).
self.display = None
def AddFormatter(self, formatter):
"""Add a new formatter to the chart (convenience method)."""
self.formatters.append(formatter)
def AddSeries(self, points, color=None, style=None, markers=None,
label=None):
"""DEPRECATED
Add a new series of data to the chart; return the DataSeries object."""
warnings.warn('AddSeries is deprecated. Instead, call AddLine for '
'LineCharts, AddBars for BarCharts, AddSegment for '
'PieCharts ', DeprecationWarning, stacklevel=2)
series = DataSeries(points, color=color, style=style, markers=markers,
label=label)
self.data.append(series)
return series
def GetDependentAxes(self):
"""Return any dependent axes ('left' and 'right' by default for LineCharts,
although bar charts would use 'bottom' and 'top').
"""
return self._axes[AxisPosition.LEFT] + self._axes[AxisPosition.RIGHT]
def GetIndependentAxes(self):
"""Return any independent axes (normally top & bottom, although horizontal
bar charts use left & right by default).
"""
return self._axes[AxisPosition.TOP] + self._axes[AxisPosition.BOTTOM]
def GetDependentAxis(self):
"""Return this chart's main dependent axis (often 'left', but
horizontal bar-charts use 'bottom').
"""
return self.left
def GetIndependentAxis(self):
"""Return this chart's main independent axis (often 'bottom', but
horizontal bar-charts use 'left').
"""
return self.bottom
def _Clone(self):
"""Make a deep copy this chart.
Formatters & display will be missing from the copy, due to limitations in
deepcopy.
"""
orig_values = {}
# Things which deepcopy will likely choke on if it tries to copy.
uncopyables = ['formatters', 'display', 'auto_color', 'auto_scale',
'auto_legend']
for name in uncopyables:
orig_values[name] = getattr(self, name)
setattr(self, name, None)
clone = copy.deepcopy(self)
for name, orig_value in orig_values.iteritems():
setattr(self, name, orig_value)
return clone
def GetFormattedChart(self):
"""Get a copy of the chart with formatting applied."""
# Formatters need to mutate the chart, but we don't want to change it out
# from under the user. So, we work on a copy of the chart.
scratchpad = self._Clone()
for formatter in self.formatters:
formatter(scratchpad)
return scratchpad
def GetMinMaxValues(self):
"""Get the largest & smallest values in this chart, returned as
(min_value, max_value). Takes into account complciations like stacked data
series.
For example, with non-stacked series, a chart with [1, 2, 3] and [4, 5, 6]
would return (1, 6). If the same chart was stacking the data series, it
would return (5, 9).
"""
MinPoint = lambda data: min(x for x in data if x is not None)
MaxPoint = lambda data: max(x for x in data if x is not None)
mins = [MinPoint(series.data) for series in self.data if series.data]
maxes = [MaxPoint(series.data) for series in self.data if series.data]
if not mins or not maxes:
return None, None # No data, just bail.
return min(mins), max(maxes)
def AddAxis(self, position, axis):
"""Add an axis to this chart in the given position.
Args:
position: an AxisPosition object specifying the axis's position
axis: The axis to add, an Axis object
Returns:
the value of the axis parameter
"""
self._axes.setdefault(position, []).append(axis)
return axis
def GetAxis(self, position):
"""Get or create the first available axis in the given position.
This is a helper method for the left, right, top, and bottom properties.
If the specified axis does not exist, it will be created.
Args:
position: the position to search for
Returns:
The first axis in the given position
"""
# Not using setdefault here just in case, to avoid calling the Axis()
# constructor needlessly
if position in self._axes:
return self._axes[position][0]
else:
axis = Axis()
self._axes[position] = [axis]
return axis
def SetAxis(self, position, axis):
"""Set the first axis in the given position to the given value.
This is a helper method for the left, right, top, and bottom properties.
Args:
position: an AxisPosition object specifying the axis's position
axis: The axis to set, an Axis object
Returns:
the value of the axis parameter
"""
self._axes.setdefault(position, [None])[0] = axis
return axis
def _GetAxes(self):
"""Return a generator of (position_code, Axis) tuples for this chart's axes.
The axes will be sorted by position using the canonical ordering sequence,
_POSITION_CODES.
"""
for code in self._POSITION_CODES:
for axis in self._axes.get(code, []):
yield (code, axis)
def _GetBottom(self):
return self.GetAxis(AxisPosition.BOTTOM)
def _SetBottom(self, value):
self.SetAxis(AxisPosition.BOTTOM, value)
bottom = property(_GetBottom, _SetBottom,
doc="""Get or set the bottom axis""")
def _GetLeft(self):
return self.GetAxis(AxisPosition.LEFT)
def _SetLeft(self, value):
self.SetAxis(AxisPosition.LEFT, value)
left = property(_GetLeft, _SetLeft,
doc="""Get or set the left axis""")
def _GetRight(self):
return self.GetAxis(AxisPosition.RIGHT)
def _SetRight(self, value):
self.SetAxis(AxisPosition.RIGHT, value)
right = property(_GetRight, _SetRight,
doc="""Get or set the right axis""")
def _GetTop(self):
return self.GetAxis(AxisPosition.TOP)
def _SetTop(self, value):
self.SetAxis(AxisPosition.TOP, value)
top = property(_GetTop, _SetTop,
doc="""Get or set the top axis""")
| apache-2.0 |
tungvx/deploy | .google_appengine/google/appengine/_internal/django/core/management/commands/syncdb.py | 23 | 8047 | from optparse import make_option
import sys
from google.appengine._internal.django.conf import settings
from google.appengine._internal.django.core.management.base import NoArgsCommand
from google.appengine._internal.django.core.management.color import no_style
from google.appengine._internal.django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
from google.appengine._internal.django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from google.appengine._internal.django.utils.datastructures import SortedDict
from google.appengine._internal.django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
# Stealth option -- 'load_initial_data' is used by the testing setup
# process to disable initial fixture loading.
load_initial_data = options.get('load_initial_data', True)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 2:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 1:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 2:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 1:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
from google.appengine._internal.django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
| apache-2.0 |
kidmaple/CoolWall | user/python/Lib/dos-8x3/test_win.py | 11 | 5449 | # Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
from _winreg import *
import os, sys
test_key_name = "SOFTWARE\\Python Registry Test Key - Delete Me"
test_data = [
("Int Value", 45, REG_DWORD),
("String Val", "A string value", REG_SZ,),
(u"Unicode Val", u"A Unicode value", REG_SZ,),
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
("UnicodeExpand", u"The path is %path%", REG_EXPAND_SZ),
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
("Multi-unicode", [u"Lots", u"of", u"unicode", u"values"], REG_MULTI_SZ),
("Multi-mixed", [u"Unicode", u"and", "string", "values"],REG_MULTI_SZ),
("Raw Data", ("binary"+chr(0)+"data"), REG_BINARY),
]
def WriteTestData(root_key):
# Set the default value for this key.
SetValue(root_key, test_key_name, REG_SZ, "Default value")
key = CreateKey(root_key, test_key_name)
# Create a sub-key
sub_key = CreateKey(key, "sub_key")
# Give the sub-key some named values
for value_name, value_data, value_type in test_data:
SetValueEx(sub_key, value_name, 0, value_type, value_data)
# Check we wrote as many items as we thought.
nkeys, nvalues, since_mod = QueryInfoKey(key)
assert nkeys==1, "Not the correct number of sub keys"
assert nvalues==1, "Not the correct number of values"
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
assert nkeys==0, "Not the correct number of sub keys"
assert nvalues==len(test_data), "Not the correct number of values"
# Close this key this way...
# (but before we do, copy the key as an integer - this allows
# us to test that the key really gets closed).
int_sub_key = int(sub_key)
CloseKey(sub_key)
try:
QueryInfoKey(int_sub_key)
raise RuntimeError, "It appears the CloseKey() function does not close the actual key!"
except EnvironmentError:
pass
# ... and close that key that way :-)
int_key = int(key)
key.Close()
try:
QueryInfoKey(int_key)
raise RuntimeError, "It appears the key.Close() function does not close the actual key!"
except EnvironmentError:
pass
def ReadTestData(root_key):
# Check we can get default value for this key.
val = QueryValue(root_key, test_key_name)
assert val=="Default value", "Registry didn't give back the correct value"
key = OpenKey(root_key, test_key_name)
# Read the sub-keys
sub_key = OpenKey(key, "sub_key")
# Check I can enumerate over the values.
index = 0
while 1:
try:
data = EnumValue(sub_key, index)
except EnvironmentError:
break
assert data in test_data, "Didn't read back the correct test data"
index = index + 1
assert index==len(test_data), "Didn't read the correct number of items"
# Check I can directly access each item
for value_name, value_data, value_type in test_data:
read_val, read_typ = QueryValueEx(sub_key, value_name)
assert read_val==value_data and read_typ == value_type, \
"Could not directly read the value"
sub_key.Close()
# Enumerate our main key.
read_val = EnumKey(key, 0)
assert read_val == "sub_key", "Read subkey value wrong"
try:
EnumKey(key, 1)
assert 0, "Was able to get a second key when I only have one!"
except EnvironmentError:
pass
key.Close()
def DeleteTestData(root_key):
key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
sub_key = OpenKey(key, "sub_key", 0, KEY_ALL_ACCESS)
# It is not necessary to delete the values before deleting
# the key (although subkeys must not exist). We delete them
# manually just to prove we can :-)
for value_name, value_data, value_type in test_data:
DeleteValue(sub_key, value_name)
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
assert nkeys==0 and nvalues==0, "subkey not empty before delete"
sub_key.Close()
DeleteKey(key, "sub_key")
try:
# Shouldnt be able to delete it twice!
DeleteKey(key, "sub_key")
assert 0, "Deleting the key twice succeeded"
except EnvironmentError:
pass
key.Close()
DeleteKey(root_key, test_key_name)
# Opening should now fail!
try:
key = OpenKey(root_key, test_key_name)
assert 0, "Could open the non-existent key"
except WindowsError: # Use this error name this time
pass
def TestAll(root_key):
WriteTestData(root_key)
ReadTestData(root_key)
DeleteTestData(root_key)
# Test on my local machine.
TestAll(HKEY_CURRENT_USER)
print "Local registry tests worked"
try:
remote_name = sys.argv[sys.argv.index("--remote")+1]
except (IndexError, ValueError):
remote_name = None
if remote_name is not None:
try:
remote_key = ConnectRegistry(remote_name, HKEY_CURRENT_USER)
except EnvironmentError, exc:
print "Could not connect to the remote machine -", exc.strerror
remote_key = None
if remote_key is not None:
TestAll(remote_key)
print "Remote registry tests worked"
else:
print "Remote registry calls can be tested using",
print "'test_winreg.py --remote \\\\machine_name'"
| gpl-2.0 |
scenarios/tensorflow | tensorflow/contrib/tensor_forest/python/kernel_tests/count_extremely_random_stats_op_test.py | 22 | 14818 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.count_extremely_random_stats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class CountExtremelyRandomStatsClassificationTest(test_util.TensorFlowTestCase):
def setUp(self):
self.input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
self.input_labels = [0, 1, 2, 3]
self.tree = [[1, 0], [-1, 0], [-1, 0]]
self.tree_thresholds = [0., 0., 0.]
self.node_map = [-1, 0, -1]
self.split_features = [[1], [-1]]
self.split_thresholds = [[1.], [0.]]
self.epochs = [0, 1, 1]
self.current_epoch = [1]
spec_proto = data_ops.TensorForestDataSpec()
f1 = spec_proto.dense.add()
f1.name = 'f1'
f1.original_type = data_ops.DATA_FLOAT
f1.size = 1
f2 = spec_proto.dense.add()
f2.name = 'f2'
f2.original_type = data_ops.DATA_FLOAT
f2.size = 1
spec_proto.dense_features_size = 2
self.data_spec = spec_proto.SerializeToString()
def testSimple(self):
with self.test_session():
(pcw_node_sums, _, pcw_splits_indices, pcw_splits_sums, _,
pcw_totals_indices, pcw_totals_sums, _,
leaves) = (tensor_forest_ops.count_extremely_random_stats(
self.input_data, [], [], [],
self.input_labels, [],
self.tree,
self.tree_thresholds,
self.node_map,
self.split_features,
self.split_thresholds,
self.epochs,
self.current_epoch,
input_spec=self.data_spec,
num_classes=5,
regression=False))
self.assertAllEqual(
[[4., 1., 1., 1., 1.], [2., 1., 1., 0., 0.], [2., 0., 0., 1., 1.]],
pcw_node_sums.eval())
self.assertAllEqual([[0, 0, 0], [0, 0, 1]], pcw_splits_indices.eval())
self.assertAllEqual([1., 1.], pcw_splits_sums.eval())
self.assertAllEqual([[0, 2], [0, 0], [0, 1]], pcw_totals_indices.eval())
self.assertAllEqual([1., 2., 1.], pcw_totals_sums.eval())
self.assertAllEqual([1, 1, 2, 2], leaves.eval())
def testSimpleWeighted(self):
with self.test_session():
input_weights = [1.5, 2.0, 3.0, 4.0]
(pcw_node_sums, _, pcw_splits_indices, pcw_splits_sums, _,
pcw_totals_indices, pcw_totals_sums, _,
leaves) = (tensor_forest_ops.count_extremely_random_stats(
self.input_data, [], [], [],
self.input_labels,
input_weights,
self.tree,
self.tree_thresholds,
self.node_map,
self.split_features,
self.split_thresholds,
self.epochs,
self.current_epoch,
input_spec=self.data_spec,
num_classes=5,
regression=False))
self.assertAllEqual([[10.5, 1.5, 2., 3., 4.], [3.5, 1.5, 2., 0., 0.],
[7., 0., 0., 3., 4.]], pcw_node_sums.eval())
self.assertAllEqual([[0, 0, 0], [0, 0, 1]], pcw_splits_indices.eval())
self.assertAllEqual([1.5, 1.5], pcw_splits_sums.eval())
self.assertAllEqual([[0, 2], [0, 0], [0, 1]], pcw_totals_indices.eval())
self.assertAllEqual([2., 3.5, 1.5], pcw_totals_sums.eval())
self.assertAllEqual([1, 1, 2, 2], leaves.eval())
def testMissingLabel(self):
labels = [0, 1, -1, 3]
with self.test_session():
(pcw_node_sums, _, pcw_splits_indices, pcw_splits_sums, _,
pcw_totals_indices, pcw_totals_sums, _,
leaves) = (tensor_forest_ops.count_extremely_random_stats(
self.input_data, [], [], [],
labels, [],
self.tree,
self.tree_thresholds,
self.node_map,
self.split_features,
self.split_thresholds,
self.epochs,
self.current_epoch,
input_spec=self.data_spec,
num_classes=5,
regression=False))
self.assertAllEqual(
[[3., 1., 1., 0., 1.], [2., 1., 1., 0., 0.], [1., 0., 0., 0., 1.]],
pcw_node_sums.eval())
self.assertAllEqual([[0, 0, 0], [0, 0, 1]], pcw_splits_indices.eval())
self.assertAllEqual([1., 1.], pcw_splits_sums.eval())
self.assertAllEqual([[0, 2], [0, 0], [0, 1]], pcw_totals_indices.eval())
self.assertAllEqual([1., 2., 1.], pcw_totals_sums.eval())
self.assertAllEqual([1, 1, 2, 2], leaves.eval())
def testSparseInput(self):
sparse_shape = [4, 10]
sparse_indices = [[0, 0], [0, 4], [0, 9], [1, 1], [1, 7], [2, 0], [3, 0],
[3, 4]]
sparse_values = [3.0, -1.0, 0.5, -1.5, 6.0, -2.0, -0.5, 2.0]
spec_proto = data_ops.TensorForestDataSpec()
f1 = spec_proto.sparse.add()
f1.name = 'f1'
f1.original_type = data_ops.DATA_FLOAT
f1.size = -1
spec_proto.dense_features_size = 0
data_spec = spec_proto.SerializeToString()
with self.test_session():
(pcw_node_sums, _, pcw_splits_indices, pcw_splits_sums, _,
pcw_totals_indices, pcw_totals_sums, _,
leaves) = (tensor_forest_ops.count_extremely_random_stats(
[],
sparse_indices,
sparse_values,
sparse_shape,
self.input_labels, [],
self.tree,
self.tree_thresholds,
self.node_map,
self.split_features,
self.split_thresholds,
self.epochs,
self.current_epoch,
input_spec=data_spec,
num_classes=5,
regression=False))
self.assertAllEqual([[4., 1., 1., 1., 1.],
[2., 0., 0., 1., 1.],
[2., 1., 1., 0., 0.]],
pcw_node_sums.eval())
self.assertAllEqual([[0, 0, 4],
[0, 0, 0],
[0, 0, 3]],
pcw_splits_indices.eval())
self.assertAllEqual([1., 2., 1.], pcw_splits_sums.eval())
self.assertAllEqual([[0, 4], [0, 0], [0, 3]], pcw_totals_indices.eval())
self.assertAllEqual([1., 2., 1.], pcw_totals_sums.eval())
self.assertAllEqual([2, 2, 1, 1], leaves.eval())
def testFutureEpoch(self):
current_epoch = [3]
with self.test_session():
(pcw_node_sums, _, _, pcw_splits_sums, _, _, pcw_totals_sums, _,
leaves) = (tensor_forest_ops.count_extremely_random_stats(
self.input_data, [], [], [],
self.input_labels, [],
self.tree,
self.tree_thresholds,
self.node_map,
self.split_features,
self.split_thresholds,
self.epochs,
current_epoch,
input_spec=self.data_spec,
num_classes=5,
regression=False))
self.assertAllEqual(
[[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]],
pcw_node_sums.eval())
self.assertAllEqual([], pcw_splits_sums.eval())
self.assertAllEqual([], pcw_totals_sums.eval())
self.assertAllEqual([1, 1, 2, 2], leaves.eval())
def testThreaded(self):
with self.test_session(
config=config_pb2.ConfigProto(intra_op_parallelism_threads=2)):
(pcw_node_sums, _, pcw_splits_indices, pcw_splits_sums, _,
pcw_totals_indices, pcw_totals_sums, _,
leaves) = (tensor_forest_ops.count_extremely_random_stats(
self.input_data, [], [], [],
self.input_labels, [],
self.tree,
self.tree_thresholds,
self.node_map,
self.split_features,
self.split_thresholds,
self.epochs,
self.current_epoch,
input_spec=self.data_spec,
num_classes=5,
regression=False))
self.assertAllEqual([[4., 1., 1., 1., 1.], [2., 1., 1., 0., 0.],
[2., 0., 0., 1., 1.]], pcw_node_sums.eval())
self.assertAllEqual([[0, 0, 0], [0, 0, 1]], pcw_splits_indices.eval())
self.assertAllEqual([1., 1.], pcw_splits_sums.eval())
self.assertAllEqual([[0, 2], [0, 0], [0, 1]], pcw_totals_indices.eval())
self.assertAllEqual([1., 2., 1.], pcw_totals_sums.eval())
self.assertAllEqual([1, 1, 2, 2], leaves.eval())
def testNoAccumulators(self):
with self.test_session():
(pcw_node_sums, _, pcw_splits_indices, pcw_splits_sums, _,
pcw_totals_indices, pcw_totals_sums, _,
leaves) = (tensor_forest_ops.count_extremely_random_stats(
self.input_data, [], [], [],
self.input_labels, [],
self.tree,
self.tree_thresholds, [-1] * 3,
self.split_features,
self.split_thresholds,
self.epochs,
self.current_epoch,
input_spec=self.data_spec,
num_classes=5,
regression=False))
self.assertAllEqual([[4., 1., 1., 1., 1.], [2., 1., 1., 0., 0.],
[2., 0., 0., 1., 1.]], pcw_node_sums.eval())
self.assertEquals((0, 3), pcw_splits_indices.eval().shape)
self.assertAllEqual([], pcw_splits_sums.eval())
self.assertEquals((0, 2), pcw_totals_indices.eval().shape)
self.assertAllEqual([], pcw_totals_sums.eval())
self.assertAllEqual([1, 1, 2, 2], leaves.eval())
def testBadInput(self):
del self.node_map[-1]
with self.test_session():
with self.assertRaisesOpError(
'Number of nodes should be the same in '
'tree, tree_thresholds, node_to_accumulator, and birth_epoch.'):
pcw_node, _, _, _, _, _, _, _, _ = (
tensor_forest_ops.count_extremely_random_stats(
self.input_data, [], [], [],
self.input_labels, [],
self.tree,
self.tree_thresholds,
self.node_map,
self.split_features,
self.split_thresholds,
self.epochs,
self.current_epoch,
input_spec=self.data_spec,
num_classes=5,
regression=False))
self.assertAllEqual([], pcw_node.eval())
class CountExtremelyRandomStatsRegressionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
self.input_labels = [[3.], [6.], [2.], [3.]]
self.tree = [[1, 0], [-1, 0], [-1, 0]]
self.tree_thresholds = [0., 0., 0.]
self.node_map = [-1, 0, -1]
self.split_features = [[1], [-1]]
self.split_thresholds = [[1.], [0.]]
self.epochs = [0, 1, 1]
self.current_epoch = [1]
spec_proto = data_ops.TensorForestDataSpec()
f1 = spec_proto.dense.add()
f1.name = 'f1'
f1.original_type = data_ops.DATA_FLOAT
f1.size = 1
f2 = spec_proto.dense.add()
f2.name = 'f2'
f2.original_type = data_ops.DATA_FLOAT
f2.size = 1
spec_proto.dense_features_size = 2
self.data_spec = spec_proto.SerializeToString()
def testSimple(self):
with self.test_session():
(pcw_node_sums, pcw_node_squares, pcw_splits_indices, pcw_splits_sums,
pcw_splits_squares, pcw_totals_indices, pcw_totals_sums,
pcw_totals_squares,
leaves) = (tensor_forest_ops.count_extremely_random_stats(
self.input_data, [], [], [],
self.input_labels, [],
self.tree,
self.tree_thresholds,
self.node_map,
self.split_features,
self.split_thresholds,
self.epochs,
self.current_epoch,
input_spec=self.data_spec,
num_classes=2,
regression=True))
self.assertAllEqual([[4., 14.], [2., 9.], [2., 5.]], pcw_node_sums.eval())
self.assertAllEqual([[4., 58.], [2., 45.], [2., 13.]],
pcw_node_squares.eval())
self.assertAllEqual([[0, 0]], pcw_splits_indices.eval())
self.assertAllEqual([[1., 3.]], pcw_splits_sums.eval())
self.assertAllEqual([[1., 9.]], pcw_splits_squares.eval())
self.assertAllEqual([[0]], pcw_totals_indices.eval())
self.assertAllEqual([[2., 9.]], pcw_totals_sums.eval())
self.assertAllEqual([[2., 45.]], pcw_totals_squares.eval())
self.assertAllEqual([1, 1, 2, 2], leaves.eval())
def testSimpleWeighted(self):
with self.test_session():
input_weights = [1.0, 2.0, 3.0, 4.0]
(pcw_node_sums, pcw_node_squares, pcw_splits_indices, pcw_splits_sums,
pcw_splits_squares, pcw_totals_indices, pcw_totals_sums,
pcw_totals_squares,
leaves) = (tensor_forest_ops.count_extremely_random_stats(
self.input_data, [], [], [],
self.input_labels,
input_weights,
self.tree,
self.tree_thresholds,
self.node_map,
self.split_features,
self.split_thresholds,
self.epochs,
self.current_epoch,
input_spec=self.data_spec,
num_classes=2,
regression=True))
self.assertAllEqual([[10., 33.], [3., 15.], [7., 18.]],
pcw_node_sums.eval())
self.assertAllEqual([[10., 129.], [3., 81.], [7., 48.]],
pcw_node_squares.eval())
self.assertAllEqual([[0, 0]], pcw_splits_indices.eval())
self.assertAllEqual([[1., 3.]], pcw_splits_sums.eval())
self.assertAllEqual([[1., 9.]], pcw_splits_squares.eval())
self.assertAllEqual([[0]], pcw_totals_indices.eval())
self.assertAllEqual([[2., 9.]], pcw_totals_sums.eval())
self.assertAllEqual([[2., 45.]], pcw_totals_squares.eval())
self.assertAllEqual([1, 1, 2, 2], leaves.eval())
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
NcLang/vimrc | sources_non_forked/YouCompleteMe/third_party/ycmd/third_party/python-future/src/past/types/olddict.py | 62 | 2735 | """
A dict subclass for Python 3 that behaves like Python 2's dict
Example use:
>>> from past.builtins import dict
>>> d1 = dict() # instead of {} for an empty dict
>>> d2 = dict(key1='value1', key2='value2')
The keys, values and items methods now return lists on Python 3.x and there are
methods for iterkeys, itervalues, iteritems, and viewkeys etc.
>>> for d in (d1, d2):
... assert isinstance(d.keys(), list)
... assert isinstance(d.values(), list)
... assert isinstance(d.items(), list)
"""
import sys
from past.utils import with_metaclass
_builtin_dict = dict
ver = sys.version_info[:2]
class BaseOldDict(type):
def __instancecheck__(cls, instance):
return isinstance(instance, _builtin_dict)
class olddict(with_metaclass(BaseOldDict, _builtin_dict)):
"""
A backport of the Python 3 dict object to Py2
"""
iterkeys = _builtin_dict.keys
viewkeys = _builtin_dict.keys
def keys(self):
return list(super(olddict, self).keys())
itervalues = _builtin_dict.values
viewvalues = _builtin_dict.values
def values(self):
return list(super(olddict, self).values())
iteritems = _builtin_dict.items
viewitems = _builtin_dict.items
def items(self):
return list(super(olddict, self).items())
def has_key(self, k):
"""
D.has_key(k) -> True if D has a key k, else False
"""
return k in self
# def __new__(cls, *args, **kwargs):
# """
# dict() -> new empty dictionary
# dict(mapping) -> new dictionary initialized from a mapping object's
# (key, value) pairs
# dict(iterable) -> new dictionary initialized as if via:
# d = {}
# for k, v in iterable:
# d[k] = v
# dict(**kwargs) -> new dictionary initialized with the name=value pairs
# in the keyword argument list. For example: dict(one=1, two=2)
# """
#
# if len(args) == 0:
# return super(olddict, cls).__new__(cls)
# # Was: elif isinstance(args[0], newbytes):
# # We use type() instead of the above because we're redefining
# # this to be True for all unicode string subclasses. Warning:
# # This may render newstr un-subclassable.
# elif type(args[0]) == olddict:
# return args[0]
# # elif isinstance(args[0], _builtin_dict):
# # value = args[0]
# else:
# value = args[0]
# return super(olddict, cls).__new__(cls, value)
def __native__(self):
"""
Hook for the past.utils.native() function
"""
return super(oldbytes, self)
__all__ = ['olddict']
| mit |
nielsvanoch/django | django/conf/locale/de/formats.py | 115 | 1100 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
Omegaphora/external_chromium_org | build/escape_unicode.py | 155 | 1482 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert any unicode characters found in the input file to C literals."""
import codecs
import optparse
import os
import sys
def main(argv):
parser = optparse.OptionParser()
usage = 'Usage: %prog -o <output_dir> <input_file>'
parser.set_usage(usage)
parser.add_option('-o', dest='output_dir')
options, arglist = parser.parse_args(argv)
if not options.output_dir:
print "output_dir required"
return 1
if len(arglist) != 2:
print "input_file required"
return 1
in_filename = arglist[1]
if not in_filename.endswith('.utf8'):
print "input_file should end in .utf8"
return 1
out_filename = os.path.join(options.output_dir, os.path.basename(
os.path.splitext(in_filename)[0]))
WriteEscapedFile(in_filename, out_filename)
return 0
def WriteEscapedFile(in_filename, out_filename):
input_data = codecs.open(in_filename, 'r', 'utf8').read()
with codecs.open(out_filename, 'w', 'ascii') as out_file:
for i, char in enumerate(input_data):
if ord(char) > 127:
out_file.write(repr(char.encode('utf8'))[1:-1])
if input_data[i + 1:i + 2] in '0123456789abcdefABCDEF':
out_file.write('""')
else:
out_file.write(char.encode('ascii'))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.