code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""
If you know what an abstract syntax tree (AST) is, you'll see that this module
is pretty much that. The classes represent syntax elements like functions and
imports.
This is the "business logic" part of the parser. There's a lot of logic here
that makes it easier for Jedi (and other libraries to deal with a Python syntax
tree.
By using `get_code` on a module, you can get back the 1-to-1 representation of
the input given to the parser. This is important if you are using refactoring.
The easiest way to play with this module is to use :class:`parsing.Parser`.
:attr:`parsing.Parser.module` holds an instance of :class:`Module`:
>>> from jedi._compatibility import u
>>> from jedi.parser import ParserWithRecovery, load_grammar
>>> parser = ParserWithRecovery(load_grammar(), u('import os'), 'example.py')
>>> submodule = parser.module
>>> submodule
<Module: example.py@1-1>
Any subclasses of :class:`Scope`, including :class:`Module` has an attribute
:attr:`imports <Scope.imports>`:
>>> submodule.imports
[<ImportName: import os@1,0>]
See also :attr:`Scope.subscopes` and :attr:`Scope.statements`.
For static analysis purposes there exists a method called
``nodes_to_execute`` on all nodes and leaves. It's documented in the static
anaylsis documentation.
"""
import os
import re
from inspect import cleandoc
from itertools import chain
import textwrap
import abc
from jedi._compatibility import (Python3Method, encoding, is_py3, utf8_repr,
literal_eval, unicode)
def _safe_literal_eval(value):
first_two = value[:2].lower()
if first_two[0] == 'f' or first_two in ('fr', 'rf'):
# literal_eval is not able to resovle f literals. We have to do that
# manually in a later stage
return ''
try:
return literal_eval(value)
except SyntaxError:
# It's possible to create syntax errors with literals like rb'' in
# Python 2. This should not be possible and in that case just return an
# empty string.
# Before Python 3.3 there was a more strict definition in which order
# you could define literals.
return ''
def search_ancestor(node, node_type_or_types):
if not isinstance(node_type_or_types, (list, tuple)):
node_type_or_types = (node_type_or_types,)
while True:
node = node.parent
if node is None or node.type in node_type_or_types:
return node
class DocstringMixin(object):
__slots__ = ()
@property
def raw_doc(self):
""" Returns a cleaned version of the docstring token. """
if isinstance(self, Module):
node = self.children[0]
elif isinstance(self, ClassOrFunc):
node = self.children[self.children.index(':') + 1]
if node.type == 'suite': # Normally a suite
node = node.children[1] # -> NEWLINE stmt
else: # ExprStmt
simple_stmt = self.parent
c = simple_stmt.parent.children
index = c.index(simple_stmt)
if not index:
return ''
node = c[index - 1]
if node.type == 'simple_stmt':
node = node.children[0]
if node.type == 'string':
# TODO We have to check next leaves until there are no new
# leaves anymore that might be part of the docstring. A
# docstring can also look like this: ``'foo' 'bar'
# Returns a literal cleaned version of the ``Token``.
cleaned = cleandoc(_safe_literal_eval(node.value))
# Since we want the docstr output to be always unicode, just
# force it.
if is_py3 or isinstance(cleaned, unicode):
return cleaned
else:
return unicode(cleaned, 'UTF-8', 'replace')
return ''
class Base(object):
"""
This is just here to have an isinstance check, which is also used on
evaluate classes. But since they have sometimes a special type of
delegation, it is important for those classes to override this method.
I know that there is a chance to do such things with __instancecheck__, but
since Python 2.5 doesn't support it, I decided to do it this way.
"""
__slots__ = ()
def isinstance(self, *cls):
return isinstance(self, cls)
def get_root_node(self):
scope = self
while scope.parent is not None:
scope = scope.parent
return scope
@Python3Method
def get_parent_until(self, classes=(), reverse=False,
include_current=True):
"""
Searches the parent "chain" until the object is an instance of
classes. If classes is empty return the last parent in the chain
(is without a parent).
"""
if type(classes) not in (tuple, list):
classes = (classes,)
scope = self if include_current else self.parent
while scope.parent is not None:
# TODO why if classes?
if classes and reverse != scope.isinstance(*classes):
break
scope = scope.parent
return scope
def get_parent_scope(self, include_flows=False):
"""
Returns the underlying scope.
"""
scope = self.parent
while scope is not None:
if include_flows and isinstance(scope, Flow):
return scope
if scope.is_scope():
break
scope = scope.parent
return scope
def get_definition(self):
if self.type in ('newline', 'endmarker'):
raise ValueError('Cannot get the indentation of whitespace or indentation.')
scope = self
while scope.parent is not None:
parent = scope.parent
if scope.isinstance(Node, Leaf) and parent.type != 'simple_stmt':
if scope.type == 'testlist_comp':
try:
if isinstance(scope.children[1], CompFor):
return scope.children[1]
except IndexError:
pass
scope = parent
else:
break
return scope
def assignment_indexes(self):
"""
Returns an array of tuple(int, node) of the indexes that are used in
tuple assignments.
For example if the name is ``y`` in the following code::
x, (y, z) = 2, ''
would result in ``[(1, xyz_node), (0, yz_node)]``.
"""
indexes = []
node = self.parent
compare = self
while node is not None:
if node.type in ('testlist_comp', 'testlist_star_expr', 'exprlist'):
for i, child in enumerate(node.children):
if child == compare:
indexes.insert(0, (int(i / 2), node))
break
else:
raise LookupError("Couldn't find the assignment.")
elif isinstance(node, (ExprStmt, CompFor)):
break
compare = node
node = node.parent
return indexes
def is_scope(self):
# Default is not being a scope. Just inherit from Scope.
return False
@abc.abstractmethod
def nodes_to_execute(self, last_added=False):
raise NotImplementedError()
def get_next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i + 1]
except IndexError:
return None
def get_previous_sibling(self):
"""
The node/leaf immediately preceding the invocant in their parent's
children list. If the invocant does not have a previous sibling, it is
None.
"""
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1]
def get_previous_leaf(self):
"""
Returns the previous leaf in the parser tree.
Raises an IndexError if it's the first element.
"""
node = self
while True:
c = node.parent.children
i = c.index(node)
if i == 0:
node = node.parent
if node.parent is None:
raise IndexError('Cannot access the previous element of the first one.')
else:
node = c[i - 1]
break
while True:
try:
node = node.children[-1]
except AttributeError: # A Leaf doesn't have children.
return node
def get_next_leaf(self):
"""
Returns the previous leaf in the parser tree.
Raises an IndexError if it's the last element.
"""
node = self
while True:
c = node.parent.children
i = c.index(node)
if i == len(c) - 1:
node = node.parent
if node.parent is None:
raise IndexError('Cannot access the next element of the last one.')
else:
node = c[i + 1]
break
while True:
try:
node = node.children[0]
except AttributeError: # A Leaf doesn't have children.
return node
class Leaf(Base):
__slots__ = ('value', 'parent', 'line', 'indent', 'prefix')
def __init__(self, value, start_pos, prefix=''):
self.value = value
self.start_pos = start_pos
self.prefix = prefix
self.parent = None
@property
def start_pos(self):
return self.line, self.indent
@start_pos.setter
def start_pos(self, value):
self.line = value[0]
self.indent = value[1]
def get_start_pos_of_prefix(self):
try:
return self.get_previous_leaf().end_pos
except IndexError:
return self.line - self.prefix.count('\n'), 0 # It's the first leaf.
@property
def end_pos(self):
return self.line, self.indent + len(self.value)
def move(self, line_offset):
self.line += line_offset
def first_leaf(self):
return self
def last_leaf(self):
return self
def get_code(self, normalized=False, include_prefix=True):
if normalized:
return self.value
if include_prefix:
return self.prefix + self.value
else:
return self.value
def nodes_to_execute(self, last_added=False):
return []
@utf8_repr
def __repr__(self):
return "<%s: %s>" % (type(self).__name__, self.value)
class LeafWithNewLines(Leaf):
__slots__ = ()
@property
def end_pos(self):
"""
Literals and whitespace end_pos are more complicated than normal
end_pos, because the containing newlines may change the indexes.
"""
lines = self.value.split('\n')
end_pos_line = self.line + len(lines) - 1
# Check for multiline token
if self.line == end_pos_line:
end_pos_indent = self.indent + len(lines[-1])
else:
end_pos_indent = len(lines[-1])
return end_pos_line, end_pos_indent
@utf8_repr
def __repr__(self):
return "<%s: %r>" % (type(self).__name__, self.value)
class EndMarker(Leaf):
__slots__ = ()
type = 'endmarker'
class Newline(LeafWithNewLines):
"""Contains NEWLINE and ENDMARKER tokens."""
__slots__ = ()
type = 'newline'
@utf8_repr
def __repr__(self):
return "<%s: %s>" % (type(self).__name__, repr(self.value))
class Name(Leaf):
"""
A string. Sometimes it is important to know if the string belongs to a name
or not.
"""
type = 'name'
__slots__ = ()
def __str__(self):
return self.value
def __unicode__(self):
return self.value
def __repr__(self):
return "<%s: %s@%s,%s>" % (type(self).__name__, self.value,
self.line, self.indent)
def is_definition(self):
if self.parent.type in ('power', 'atom_expr'):
# In `self.x = 3` self is not a definition, but x is.
return False
stmt = self.get_definition()
if stmt.type in ('funcdef', 'classdef', 'file_input', 'param'):
return self == stmt.name
elif stmt.type == 'for_stmt':
return self.start_pos < stmt.children[2].start_pos
elif stmt.type == 'try_stmt':
return self.get_previous_sibling() == 'as'
else:
return stmt.type in ('expr_stmt', 'import_name', 'import_from',
'comp_for', 'with_stmt') \
and self in stmt.get_defined_names()
def nodes_to_execute(self, last_added=False):
if last_added is False:
yield self
class Literal(LeafWithNewLines):
__slots__ = ()
def eval(self):
return _safe_literal_eval(self.value)
class Number(Literal):
type = 'number'
__slots__ = ()
class String(Literal):
type = 'string'
__slots__ = ()
class Operator(Leaf):
type = 'operator'
__slots__ = ()
def __str__(self):
return self.value
def __eq__(self, other):
"""
Make comparisons with strings easy.
Improves the readability of the parser.
"""
if isinstance(other, Operator):
return self is other
else:
return self.value == other
def __ne__(self, other):
"""Python 2 compatibility."""
return self.value != other
def __hash__(self):
return hash(self.value)
class Keyword(Leaf):
type = 'keyword'
__slots__ = ()
def __eq__(self, other):
"""
Make comparisons with strings easy.
Improves the readability of the parser.
"""
if isinstance(other, Keyword):
return self is other
return self.value == other
def __ne__(self, other):
"""Python 2 compatibility."""
return not self.__eq__(other)
def __hash__(self):
return hash(self.value)
class BaseNode(Base):
"""
The super class for Scope, Import, Name and Statement. Every object in
the parser tree inherits from this class.
"""
__slots__ = ('children', 'parent')
type = None
def __init__(self, children):
"""
Initialize :class:`BaseNode`.
:param children: The module in which this Python object locates.
"""
for c in children:
c.parent = self
self.children = children
self.parent = None
def move(self, line_offset):
"""
Move the Node's start_pos.
"""
for c in self.children:
c.move(line_offset)
@property
def start_pos(self):
return self.children[0].start_pos
def get_start_pos_of_prefix(self):
return self.children[0].get_start_pos_of_prefix()
@property
def end_pos(self):
return self.children[-1].end_pos
def _get_code_for_children(self, children, normalized, include_prefix):
# TODO implement normalized (depending on context).
if include_prefix:
return "".join(c.get_code(normalized) for c in children)
else:
first = children[0].get_code(include_prefix=False)
return first + "".join(c.get_code(normalized) for c in children[1:])
def get_code(self, normalized=False, include_prefix=True):
return self._get_code_for_children(self.children, normalized, include_prefix)
@Python3Method
def name_for_position(self, position):
for c in self.children:
if isinstance(c, Leaf):
if isinstance(c, Name) and c.start_pos <= position <= c.end_pos:
return c
else:
result = c.name_for_position(position)
if result is not None:
return result
return None
def get_leaf_for_position(self, position, include_prefixes=False):
def binary_search(lower, upper):
if lower == upper:
element = self.children[lower]
if not include_prefixes and position < element.start_pos:
# We're on a prefix.
return None
# In case we have prefixes, a leaf always matches
try:
return element.get_leaf_for_position(position, include_prefixes)
except AttributeError:
return element
index = int((lower + upper) / 2)
element = self.children[index]
if position <= element.end_pos:
return binary_search(lower, index)
else:
return binary_search(index + 1, upper)
if not ((1, 0) <= position <= self.children[-1].end_pos):
raise ValueError('Please provide a position that exists within this node.')
return binary_search(0, len(self.children) - 1)
@Python3Method
def get_statement_for_position(self, pos):
for c in self.children:
if c.start_pos <= pos <= c.end_pos:
if c.type not in ('decorated', 'simple_stmt', 'suite') \
and not isinstance(c, (Flow, ClassOrFunc)):
return c
else:
try:
return c.get_statement_for_position(pos)
except AttributeError:
pass # Must be a non-scope
return None
def first_leaf(self):
try:
return self.children[0].first_leaf()
except AttributeError:
return self.children[0]
def get_next_leaf(self):
"""
Raises an IndexError if it's the last node. (Would be the module)
"""
c = self.parent.children
index = c.index(self)
if index == len(c) - 1:
# TODO WTF? recursion?
return self.get_next_leaf()
else:
return c[index + 1]
def last_leaf(self):
return self.children[-1].last_leaf()
def get_following_comment_same_line(self):
"""
returns (as string) any comment that appears on the same line,
after the node, including the #
"""
try:
if self.isinstance(ForStmt):
whitespace = self.children[5].first_leaf().prefix
elif self.isinstance(WithStmt):
whitespace = self.children[3].first_leaf().prefix
else:
whitespace = self.last_leaf().get_next_leaf().prefix
except AttributeError:
return None
except ValueError:
# TODO in some particular cases, the tree doesn't seem to be linked
# correctly
return None
if "#" not in whitespace:
return None
comment = whitespace[whitespace.index("#"):]
if "\r" in comment:
comment = comment[:comment.index("\r")]
if "\n" in comment:
comment = comment[:comment.index("\n")]
return comment
@utf8_repr
def __repr__(self):
code = self.get_code().replace('\n', ' ').strip()
if not is_py3:
code = code.encode(encoding, 'replace')
return "<%s: %s@%s,%s>" % \
(type(self).__name__, code, self.start_pos[0], self.start_pos[1])
class Node(BaseNode):
"""Concrete implementation for interior nodes."""
__slots__ = ('type',)
_IGNORE_EXECUTE_NODES = set([
'suite', 'subscriptlist', 'subscript', 'simple_stmt', 'sliceop',
'testlist_comp', 'dictorsetmaker', 'trailer', 'decorators',
'decorated', 'arglist', 'argument', 'exprlist', 'testlist',
'testlist_safe', 'testlist1'
])
def __init__(self, type, children):
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
super(Node, self).__init__(children)
self.type = type
def nodes_to_execute(self, last_added=False):
"""
For static analysis.
"""
result = []
if self.type not in Node._IGNORE_EXECUTE_NODES and not last_added:
result.append(self)
last_added = True
for child in self.children:
result += child.nodes_to_execute(last_added)
return result
def __repr__(self):
return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children)
class ErrorNode(BaseNode):
"""
TODO doc
"""
__slots__ = ()
type = 'error_node'
def nodes_to_execute(self, last_added=False):
return []
class ErrorLeaf(LeafWithNewLines):
"""
TODO doc
"""
__slots__ = ('original_type')
type = 'error_leaf'
def __init__(self, original_type, value, start_pos, prefix=''):
super(ErrorLeaf, self).__init__(value, start_pos, prefix)
self.original_type = original_type
def __repr__(self):
return "<%s: %s:%s, %s)>" % \
(type(self).__name__, self.original_type, repr(self.value), self.start_pos)
class Scope(BaseNode, DocstringMixin):
"""
Super class for the parser tree, which represents the state of a python
text file.
A Scope manages and owns its subscopes, which are classes and functions, as
well as variables and imports. It is used to access the structure of python
files.
:param start_pos: The position (line and column) of the scope.
:type start_pos: tuple(int, int)
"""
__slots__ = ()
def __init__(self, children):
super(Scope, self).__init__(children)
@property
def returns(self):
# Needed here for fast_parser, because the fast_parser splits and
# returns will be in "normal" modules.
return self._search_in_scope(ReturnStmt)
@property
def subscopes(self):
return self._search_in_scope(Scope)
@property
def flows(self):
return self._search_in_scope(Flow)
@property
def imports(self):
return self._search_in_scope(Import)
@Python3Method
def _search_in_scope(self, typ):
def scan(children):
elements = []
for element in children:
if isinstance(element, typ):
elements.append(element)
if element.type in ('suite', 'simple_stmt', 'decorated') \
or isinstance(element, Flow):
elements += scan(element.children)
return elements
return scan(self.children)
@property
def statements(self):
return self._search_in_scope((ExprStmt, KeywordStatement))
def is_scope(self):
return True
def __repr__(self):
try:
name = self.path
except AttributeError:
try:
name = self.name
except AttributeError:
name = self.command
return "<%s: %s@%s-%s>" % (type(self).__name__, name,
self.start_pos[0], self.end_pos[0])
def walk(self):
yield self
for s in self.subscopes:
for scope in s.walk():
yield scope
for r in self.statements:
while isinstance(r, Flow):
for scope in r.walk():
yield scope
r = r.next
class Module(Scope):
"""
The top scope, which is always a module.
Depending on the underlying parser this may be a full module or just a part
of a module.
"""
__slots__ = ('path', 'used_names', '_name')
type = 'file_input'
def __init__(self, children):
"""
Initialize :class:`Module`.
:type path: str
:arg path: File path to this module.
.. todo:: Document `top_module`.
"""
super(Module, self).__init__(children)
self.path = None # Set later.
@property
def name(self):
""" This is used for the goto functions. """
if self.path is None:
string = '' # no path -> empty name
else:
sep = (re.escape(os.path.sep),) * 2
r = re.search(r'([^%s]*?)(%s__init__)?(\.py|\.so)?$' % sep, self.path)
# Remove PEP 3149 names
string = re.sub('\.[a-z]+-\d{2}[mud]{0,3}$', '', r.group(1))
# Positions are not real, but a module starts at (1, 0)
p = (1, 0)
name = Name(string, p)
name.parent = self
return name
@property
def has_explicit_absolute_import(self):
"""
Checks if imports in this module are explicitly absolute, i.e. there
is a ``__future__`` import.
"""
# TODO this is a strange scan and not fully correct. I think Python's
# parser does it in a different way and scans for the first
# statement/import with a tokenizer (to check for syntax changes like
# the future print statement).
for imp in self.imports:
if imp.type == 'import_from' and imp.level == 0:
for path in imp.paths():
if [str(name) for name in path] == ['__future__', 'absolute_import']:
return True
return False
def nodes_to_execute(self, last_added=False):
# Yield itself, class needs to be executed for decorator checks.
result = []
for child in self.children:
result += child.nodes_to_execute()
return result
class Decorator(BaseNode):
type = 'decorator'
__slots__ = ()
def nodes_to_execute(self, last_added=False):
if self.children[-2] == ')':
node = self.children[-3]
if node != '(':
return node.nodes_to_execute()
return []
class ClassOrFunc(Scope):
__slots__ = ()
@property
def name(self):
return self.children[1]
def get_decorators(self):
decorated = self.parent
if decorated.type == 'decorated':
if decorated.children[0].type == 'decorators':
return decorated.children[0].children
else:
return decorated.children[:1]
else:
return []
class Class(ClassOrFunc):
"""
Used to store the parsed contents of a python class.
:param name: The Class name.
:type name: str
:param supers: The super classes of a Class.
:type supers: list
:param start_pos: The start position (line, column) of the class.
:type start_pos: tuple(int, int)
"""
type = 'classdef'
__slots__ = ()
def __init__(self, children):
super(Class, self).__init__(children)
def get_super_arglist(self):
if self.children[2] != '(': # Has no parentheses
return None
else:
if self.children[3] == ')': # Empty parentheses
return None
else:
return self.children[3]
@property
def doc(self):
"""
Return a document string including call signature of __init__.
"""
docstr = self.raw_doc
for sub in self.subscopes:
if str(sub.name) == '__init__':
return '%s\n\n%s' % (
sub.get_call_signature(func_name=self.name), docstr)
return docstr
def nodes_to_execute(self, last_added=False):
# Yield itself, class needs to be executed for decorator checks.
yield self
# Super arguments.
arglist = self.get_super_arglist()
try:
children = arglist.children
except AttributeError:
if arglist is not None:
for node_to_execute in arglist.nodes_to_execute():
yield node_to_execute
else:
for argument in children:
if argument.type == 'argument':
# metaclass= or list comprehension or */**
raise NotImplementedError('Metaclasses not implemented')
else:
for node_to_execute in argument.nodes_to_execute():
yield node_to_execute
# care for the class suite:
for node in self.children[self.children.index(':'):]:
# This could be easier without the fast parser. But we need to find
# the position of the colon, because everything after it can be a
# part of the class, not just its suite.
for node_to_execute in node.nodes_to_execute():
yield node_to_execute
def _create_params(parent, argslist_list):
"""
`argslist_list` is a list that can contain an argslist as a first item, but
most not. It's basically the items between the parameter brackets (which is
at most one item).
This function modifies the parser structure. It generates `Param` objects
from the normal ast. Those param objects do not exist in a normal ast, but
make the evaluation of the ast tree so much easier.
You could also say that this function replaces the argslist node with a
list of Param objects.
"""
def check_python2_nested_param(node):
"""
Python 2 allows params to look like ``def x(a, (b, c))``, which is
basically a way of unpacking tuples in params. Python 3 has ditched
this behavior. Jedi currently just ignores those constructs.
"""
return node.type == 'tfpdef' and node.children[0] == '('
try:
first = argslist_list[0]
except IndexError:
return []
if first.type in ('name', 'tfpdef'):
if check_python2_nested_param(first):
return [first]
else:
return [Param([first], parent)]
elif first == '*':
return [first]
else: # argslist is a `typedargslist` or a `varargslist`.
children = first.children
new_children = []
start = 0
# Start with offset 1, because the end is higher.
for end, child in enumerate(children + [None], 1):
if child is None or child == ',':
param_children = children[start:end]
if param_children: # Could as well be comma and then end.
if check_python2_nested_param(param_children[0]):
new_children += param_children
elif param_children[0] == '*' and param_children[1] == ',':
new_children += param_children
else:
new_children.append(Param(param_children, parent))
start = end
return new_children
class Function(ClassOrFunc):
"""
Used to store the parsed contents of a python function.
Children:
0) <Keyword: def>
1) <Name>
2) parameter list (including open-paren and close-paren <Operator>s)
3) <Operator: :>
4) Node() representing function body
5) ??
6) annotation (if present)
"""
type = 'funcdef'
def __init__(self, children):
super(Function, self).__init__(children)
parameters = self.children[2] # After `def foo`
parameters.children[1:-1] = _create_params(parameters, parameters.children[1:-1])
@property
def params(self):
return [p for p in self.children[2].children if p.type == 'param']
@property
def name(self):
return self.children[1] # First token after `def`
@property
def yields(self):
# TODO This is incorrect, yields are also possible in a statement.
return self._search_in_scope(YieldExpr)
def is_generator(self):
return bool(self.yields)
def annotation(self):
try:
if self.children[3] == "->":
return self.children[4]
assert self.children[3] == ":"
return None
except IndexError:
return None
def get_call_signature(self, width=72, func_name=None):
"""
Generate call signature of this function.
:param width: Fold lines if a line is longer than this value.
:type width: int
:arg func_name: Override function name when given.
:type func_name: str
:rtype: str
"""
func_name = func_name or self.name
code = unicode(func_name) + self._get_paramlist_code()
return '\n'.join(textwrap.wrap(code, width))
def _get_paramlist_code(self):
return self.children[2].get_code()
@property
def doc(self):
""" Return a document string including call signature. """
docstr = self.raw_doc
return '%s\n\n%s' % (self.get_call_signature(), docstr)
def nodes_to_execute(self, last_added=False):
# Yield itself, functions needs to be executed for decorator checks.
yield self
for param in self.params:
if param.default is not None:
yield param.default
# care for the function suite:
for node in self.children[4:]:
# This could be easier without the fast parser. The fast parser
# allows that the 4th position is empty or that there's even a
# fifth element (another function/class). So just scan everything
# after colon.
for node_to_execute in node.nodes_to_execute():
yield node_to_execute
class Lambda(Function):
"""
Lambdas are basically trimmed functions, so give it the same interface.
Children:
0) <Keyword: lambda>
*) <Param x> for each argument x
-2) <Operator: :>
-1) Node() representing body
"""
type = 'lambda'
__slots__ = ()
def __init__(self, children):
# We don't want to call the Function constructor, call its parent.
super(Function, self).__init__(children)
lst = self.children[1:-2] # Everything between `lambda` and the `:` operator is a parameter.
self.children[1:-2] = _create_params(self, lst)
@property
def name(self):
# Borrow the position of the <Keyword: lambda> AST node.
return Name('<lambda>', self.children[0].start_pos)
def _get_paramlist_code(self):
return '(' + ''.join(param.get_code() for param in self.params).strip() + ')'
@property
def params(self):
return self.children[1:-2]
def is_generator(self):
return False
def annotation(self):
# lambda functions do not support annotations
return None
@property
def yields(self):
return []
def nodes_to_execute(self, last_added=False):
for param in self.params:
if param.default is not None:
yield param.default
# Care for the lambda test (last child):
for node_to_execute in self.children[-1].nodes_to_execute():
yield node_to_execute
def __repr__(self):
return "<%s@%s>" % (self.__class__.__name__, self.start_pos)
class Flow(BaseNode):
__slots__ = ()
FLOW_KEYWORDS = (
'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while'
)
def nodes_to_execute(self, last_added=False):
for child in self.children:
for node_to_execute in child.nodes_to_execute():
yield node_to_execute
def get_branch_keyword(self, node):
start_pos = node.start_pos
if not (self.start_pos < start_pos <= self.end_pos):
raise ValueError('The node is not part of the flow.')
keyword = None
for i, child in enumerate(self.children):
if start_pos < child.start_pos:
return keyword
first_leaf = child.first_leaf()
if first_leaf in self.FLOW_KEYWORDS:
keyword = first_leaf
return 0
class IfStmt(Flow):
type = 'if_stmt'
__slots__ = ()
def check_nodes(self):
"""
Returns all the `test` nodes that are defined as x, here:
if x:
pass
elif x:
pass
"""
for i, c in enumerate(self.children):
if c in ('elif', 'if'):
yield self.children[i + 1]
def node_in_which_check_node(self, node):
"""
Returns the check node (see function above) that a node is contained
in. However if it the node is in the check node itself and not in the
suite return None.
"""
start_pos = node.start_pos
for check_node in reversed(list(self.check_nodes())):
if check_node.start_pos < start_pos:
if start_pos < check_node.end_pos:
return None
# In this case the node is within the check_node itself,
# not in the suite
else:
return check_node
def node_after_else(self, node):
"""
Checks if a node is defined after `else`.
"""
for c in self.children:
if c == 'else':
if node.start_pos > c.start_pos:
return True
else:
return False
class WhileStmt(Flow):
type = 'while_stmt'
__slots__ = ()
class ForStmt(Flow):
type = 'for_stmt'
__slots__ = ()
def get_input_node(self):
"""
Returns the input node ``y`` from: ``for x in y:``.
"""
return self.children[3]
def defines_one_name(self):
"""
Returns True if only one name is returned: ``for x in y``.
Returns False if the for loop is more complicated: ``for x, z in y``.
:returns: bool
"""
return self.children[1].type == 'name'
class TryStmt(Flow):
type = 'try_stmt'
__slots__ = ()
def except_clauses(self):
"""
Returns the ``test`` nodes found in ``except_clause`` nodes.
Returns ``[None]`` for except clauses without an exception given.
"""
for node in self.children:
if node.type == 'except_clause':
yield node.children[1]
elif node == 'except':
yield None
def nodes_to_execute(self, last_added=False):
result = []
for child in self.children[2::3]:
result += child.nodes_to_execute()
for child in self.children[0::3]:
if child.type == 'except_clause':
# Add the test node and ignore the `as NAME` definition.
result += child.children[1].nodes_to_execute()
return result
class WithStmt(Flow):
type = 'with_stmt'
__slots__ = ()
def get_defined_names(self):
names = []
for with_item in self.children[1:-2:2]:
# Check with items for 'as' names.
if with_item.type == 'with_item':
names += _defined_names(with_item.children[2])
return names
def node_from_name(self, name):
node = name
while True:
node = node.parent
if node.type == 'with_item':
return node.children[0]
def nodes_to_execute(self, last_added=False):
result = []
for child in self.children[1::2]:
if child.type == 'with_item':
# Just ignore the `as EXPR` part - at least for now, because
# most times it's just a name.
child = child.children[0]
result += child.nodes_to_execute()
return result
class Import(BaseNode):
__slots__ = ()
def path_for_name(self, name):
try:
# The name may be an alias. If it is, just map it back to the name.
name = self.aliases()[name]
except KeyError:
pass
for path in self.paths():
if name in path:
return path[:path.index(name) + 1]
raise ValueError('Name should be defined in the import itself')
def is_nested(self):
return False # By default, sub classes may overwrite this behavior
def is_star_import(self):
return self.children[-1] == '*'
def nodes_to_execute(self, last_added=False):
"""
`nodes_to_execute` works a bit different for imports, because the names
itself cannot directly get resolved (except on itself).
"""
# TODO couldn't we return the names? Would be nicer.
return [self]
class ImportFrom(Import):
type = 'import_from'
__slots__ = ()
def get_defined_names(self):
return [alias or name for name, alias in self._as_name_tuples()]
def aliases(self):
"""Mapping from alias to its corresponding name."""
return dict((alias, name) for name, alias in self._as_name_tuples()
if alias is not None)
def get_from_names(self):
for n in self.children[1:]:
if n not in ('.', '...'):
break
if n.type == 'dotted_name': # from x.y import
return n.children[::2]
elif n == 'import': # from . import
return []
else: # from x import
return [n]
@property
def level(self):
"""The level parameter of ``__import__``."""
level = 0
for n in self.children[1:]:
if n in ('.', '...'):
level += len(n.value)
else:
break
return level
def _as_name_tuples(self):
last = self.children[-1]
if last == ')':
last = self.children[-2]
elif last == '*':
return # No names defined directly.
if last.type == 'import_as_names':
as_names = last.children[::2]
else:
as_names = [last]
for as_name in as_names:
if as_name.type == 'name':
yield as_name, None
else:
yield as_name.children[::2] # yields x, y -> ``x as y``
def star_import_name(self):
"""
The last name defined in a star import.
"""
return self.paths()[-1][-1]
def paths(self):
"""
The import paths defined in an import statement. Typically an array
like this: ``[<Name: datetime>, <Name: date>]``.
"""
dotted = self.get_from_names()
if self.children[-1] == '*':
return [dotted]
return [dotted + [name] for name, alias in self._as_name_tuples()]
class ImportName(Import):
"""For ``import_name`` nodes. Covers normal imports without ``from``."""
type = 'import_name'
__slots__ = ()
def get_defined_names(self):
return [alias or path[0] for path, alias in self._dotted_as_names()]
@property
def level(self):
"""The level parameter of ``__import__``."""
return 0 # Obviously 0 for imports without from.
def paths(self):
return [path for path, alias in self._dotted_as_names()]
def _dotted_as_names(self):
"""Generator of (list(path), alias) where alias may be None."""
dotted_as_names = self.children[1]
if dotted_as_names.type == 'dotted_as_names':
as_names = dotted_as_names.children[::2]
else:
as_names = [dotted_as_names]
for as_name in as_names:
if as_name.type == 'dotted_as_name':
alias = as_name.children[2]
as_name = as_name.children[0]
else:
alias = None
if as_name.type == 'name':
yield [as_name], alias
else:
# dotted_names
yield as_name.children[::2], alias
def is_nested(self):
"""
This checks for the special case of nested imports, without aliases and
from statement::
import foo.bar
"""
return [1 for path, alias in self._dotted_as_names()
if alias is None and len(path) > 1]
def aliases(self):
return dict((alias, path[-1]) for path, alias in self._dotted_as_names()
if alias is not None)
class KeywordStatement(BaseNode):
"""
For the following statements: `assert`, `del`, `global`, `nonlocal`,
`raise`, `return`, `yield`, `return`, `yield`.
`pass`, `continue` and `break` are not in there, because they are just
simple keywords and the parser reduces it to a keyword.
"""
__slots__ = ()
@property
def type(self):
"""
Keyword statements start with the keyword and end with `_stmt`. You can
crosscheck this with the Python grammar.
"""
return '%s_stmt' % self.keyword
@property
def keyword(self):
return self.children[0].value
def nodes_to_execute(self, last_added=False):
result = []
for child in self.children:
result += child.nodes_to_execute()
return result
class AssertStmt(KeywordStatement):
__slots__ = ()
def assertion(self):
return self.children[1]
class GlobalStmt(KeywordStatement):
__slots__ = ()
def get_defined_names(self):
return []
def get_global_names(self):
return self.children[1::2]
def nodes_to_execute(self, last_added=False):
"""
The global keyword allows to define any name. Even if it doesn't
exist.
"""
return []
class ReturnStmt(KeywordStatement):
__slots__ = ()
class YieldExpr(BaseNode):
__slots__ = ()
@property
def type(self):
return 'yield_expr'
def nodes_to_execute(self, last_added=False):
if len(self.children) > 1:
return self.children[1].nodes_to_execute()
else:
return []
def _defined_names(current):
"""
A helper function to find the defined names in statements, for loops and
list comprehensions.
"""
names = []
if current.type in ('testlist_star_expr', 'testlist_comp', 'exprlist'):
for child in current.children[::2]:
names += _defined_names(child)
elif current.type in ('atom', 'star_expr'):
names += _defined_names(current.children[1])
elif current.type in ('power', 'atom_expr'):
if current.children[-2] != '**': # Just if there's no operation
trailer = current.children[-1]
if trailer.children[0] == '.':
names.append(trailer.children[1])
else:
names.append(current)
return names
class ExprStmt(BaseNode, DocstringMixin):
type = 'expr_stmt'
__slots__ = ()
def get_defined_names(self):
names = []
if self.children[1].type == 'annassign':
names = _defined_names(self.children[0])
return list(chain.from_iterable(
_defined_names(self.children[i])
for i in range(0, len(self.children) - 2, 2)
if '=' in self.children[i + 1].value)
) + names
def get_rhs(self):
"""Returns the right-hand-side of the equals."""
return self.children[-1]
def first_operation(self):
"""
Returns `+=`, `=`, etc or None if there is no operation.
"""
try:
return self.children[1]
except IndexError:
return None
def nodes_to_execute(self, last_added=False):
# I think evaluating the statement (and possibly returned arrays),
# should be enough for static analysis.
result = [self]
for child in self.children:
result += child.nodes_to_execute(last_added=True)
return result
class Param(BaseNode):
"""
It's a helper class that makes business logic with params much easier. The
Python grammar defines no ``param`` node. It defines it in a different way
that is not really suited to working with parameters.
"""
type = 'param'
def __init__(self, children, parent):
super(Param, self).__init__(children)
self.parent = parent
for child in children:
child.parent = self
@property
def stars(self):
first = self.children[0]
if first in ('*', '**'):
return len(first.value)
return 0
@property
def default(self):
try:
return self.children[int(self.children[0] in ('*', '**')) + 2]
except IndexError:
return None
def annotation(self):
tfpdef = self._tfpdef()
if tfpdef.type == 'tfpdef':
assert tfpdef.children[1] == ":"
assert len(tfpdef.children) == 3
annotation = tfpdef.children[2]
return annotation
else:
return None
def _tfpdef(self):
"""
tfpdef: see grammar.txt.
"""
offset = int(self.children[0] in ('*', '**'))
return self.children[offset]
@property
def name(self):
if self._tfpdef().type == 'tfpdef':
return self._tfpdef().children[0]
else:
return self._tfpdef()
@property
def position_nr(self):
return self.parent.children.index(self) - 1
def get_parent_function(self):
return search_ancestor(self, ('funcdef', 'lambda'))
def __repr__(self):
default = '' if self.default is None else '=%s' % self.default.get_code()
return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default)
def get_description(self):
children = self.children
if children[-1] == ',':
children = children[:-1]
return self._get_code_for_children(children, False, False)
class CompFor(BaseNode):
type = 'comp_for'
__slots__ = ()
def get_comp_fors(self):
yield self
last = self.children[-1]
while True:
if isinstance(last, CompFor):
yield last
elif not last.type == 'comp_if':
break
last = last.children[-1]
def is_scope(self):
return True
def get_defined_names(self):
return _defined_names(self.children[1])
def nodes_to_execute(self, last_added=False):
last = self.children[-1]
if last.type == 'comp_if':
for node in last.children[-1].nodes_to_execute():
yield node
last = self.children[-2]
elif last.type == 'comp_for':
for node in last.nodes_to_execute():
yield node
last = self.children[-2]
for node in last.nodes_to_execute():
yield node
|
tequa/ammisoft
|
ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/jedi/parser/tree.py
|
Python
|
bsd-3-clause
| 51,159
|
#!/usr/bin/env python
import os
import sys
import re
import copy
filename = sys.argv[1]
with open(filename, 'r') as infile:
lines = infile.read().splitlines()
match = re.search(r'contours_(\w+)_(\d)(\d)\.', filename)
organtech = match.group(1)
N = int(match.group(2))
M = int(match.group(3))
class Contour(object):
class DataPoint(object):
def __init__(self, text, x, y, z):
self.text = text
self.x = x
self.y = y
self.z = z
def __eq__(self, x):
return self.text == x.text
def __init__(self, label):
self.label = label
self.points = []
def add_point(self, *args, **kwargs):
self.points.append(Contour.DataPoint(*args, **kwargs))
headpat = re.compile(r'^#\s*[Cc]ontour\s+\d+\s*,\s*[Ll]abel\s*:\s*([\-+\.0-9]+)\s*$')
datapat = re.compile(r'^\s*([\-+0-9\.]+)\s+([\-+0-9\.]+)\s+([\-+0-9\.]+)\s*$')
contour = None
contours = []
for line in lines:
if headpat.match(line):
label = headpat.match(line).group(1)
if contour is not None:
contours.append(contour)
contour = Contour(label)
elif datapat.match(line):
x = datapat.match(line).group(1)
y = datapat.match(line).group(2)
z = datapat.match(line).group(3)
if contour is None:
contour = Contour(z)
elif contour.label != z:
contours.append(contour)
contour = Contour(z)
contour.add_point(line, x, y, z)
elif line == "":
if contour is not None:
contours.append(contour)
contour = None
elif line.startswith('#'):
continue
if contour is not None:
contours.append(contour)
# Filter out any contour objects that have zero points.
contours = [x for x in contours if len(x.points) > 0]
# We have to try glue the contours together into continous lines.
old_contours = contours
contours = []
for co in old_contours:
merged = False
for cn in contours:
if co.points[0] == cn.points[0]:
cn.points = reversed(co.points) + cn.points
merged = True
continue
elif co.points[0] == cn.points[-1]:
cn.points = cn.points + co.points
merged = True
continue
elif co.points[-1] == cn.points[0]:
cn.points = co.points + cn.points
merged = True
continue
elif co.points[-1] == cn.points[-1]:
cn.points = cn.points + reversed(co.points)
merged = True
continue
if not merged:
contours.append(co)
labelpoints = []
k = 16
low = False
for c in contours:
if len(c.points) < 2*k+1:
continue
if low:
m = int(float(len(c.points)) * 2./3.)
else:
m = int(len(c.points) / 3)
# The following is for adjusting individual contour labels.
# N indicates the plot column left to right 1 .. 4.
# M indicates the plot row top to bottom 1 .. 4.
# Here we adjust the distance along the curve m (in the range 0..1) to place
# the label.
if organtech == "cion_bladder":
if c.points[0].z == "1.75":
if N == 4 and M == 1:
m = int(float(len(c.points)) * 2.0/5.)
elif N == 1 and M == 2:
m = int(float(len(c.points)) * 0.14)
elif N == 2 and M == 2:
m = int(float(len(c.points)) * 0.115)
elif N == 3 and M == 2:
m = int(float(len(c.points)) * 0.19)
elif N == 4 and M == 2:
m = int(float(len(c.points)) * 0.19)
elif N == 1 and M == 3:
m = int(float(len(c.points)) * 0.21)
elif N == 2 and M == 3:
m = int(float(len(c.points)) * 0.17)
elif N in [3, 4] and M == 3:
m = int(float(len(c.points)) * 0.31)
elif N == 1 and M == 4:
m = int(float(len(c.points)) * 0.2)
elif N == 2 and M == 4:
m = int(float(len(c.points)) * 0.185)
elif N == 3 and M == 4:
m = int(float(len(c.points)) * 0.22)
elif N == 4 and M == 4:
m = int(float(len(c.points)) * 0.43)
else:
m = int(float(len(c.points)) * 1.5/8.)
if c.points[0].z == "1.5":
if M == 3:
m = int(float(len(c.points)) * 1./2.)
if c.points[0].z == "2":
if M == 4:
if N == 1:
m = int(float(len(c.points)) * 0.165)
elif N == 4:
m = int(float(len(c.points)) * 0.14)
else:
m = int(float(len(c.points)) * 0.15)
else:
if N in [1, 2]:
m = int(float(len(c.points)) * 1.75/8.)
elif N == 3:
m = int(float(len(c.points)) * 1.65/8.)
else:
m = int(float(len(c.points)) * 1.5/8.)
elif organtech == "cion_rectum":
if float(c.points[0].z) > 1:
m = int(float(len(c.points)) * 0.0)
elif organtech == "proton_bladder":
if c.points[0].z == "2.5":
if M == 1 and N == 2:
m = int(float(len(c.points)) * 0.8)
elif M == 1 and N == 3:
m = int(float(len(c.points)) * 0.63)
elif M == 1 and N == 4:
m = int(float(len(c.points)) * 0.57)
elif M == 2 and N == 3:
m = int(float(len(c.points)) * 0.6)
elif M == 2 and N == 4:
m = int(float(len(c.points)) * 0.43)
elif M == 3 and N == 2:
m = int(float(len(c.points)) * 0.0)
elif M == 3 and N == 3:
m = int(float(len(c.points)) * 0.43)
elif M == 3 and N == 4:
m = int(float(len(c.points)) * 0.32)
elif M == 4 and N in [3, 4]:
m = int(float(len(c.points)) * 0.32)
elif organtech == "proton_rectum":
if float(c.points[0].z) > 1.5:
m = int(float(len(c.points)) * 0.0)
x = float(c.points[m].x)
y = float(c.points[m].y)
if x < 0.03 or 0.775 < x or y < 0.013 or 0.049 < y:
continue
low = not low
labelpoints.append( copy.deepcopy(c.points[m]) )
for n in xrange(m-k,m+k):
c.points[n].text = ""
c.points = c.points[0:m-k] + [c.points[m]] + c.points[m+k:]
filename = sys.argv[2]
with open(filename, 'w') as outfile:
for c in contours:
outfile.write("\n");
for p in c.points:
outfile.write(p.text + "\n");
filename = sys.argv[3]
with open(filename, 'w') as outfile:
for p in labelpoints:
# Change label for the "1" contour to "1.0".
if '.' not in p.z and int(p.z) == 1:
p.text = "{0} {1} {2:.1f}".format(p.x, p.y, float(p.z))
# Ajust the middle position of some of the labels:
if organtech == "cion_bladder":
if p.z == "1.75" and M == 2 and N == 1:
p.text = "{0} {1} {2}".format(float(p.x) - 0.005, p.y, p.z)
elif p.z == "1.75" and M == 2 and N == 2:
p.text = "{0} {1} {2}".format(float(p.x) - 0.0075, p.y, p.z)
elif p.z == "1.75" and M == 2 and N in [3, 4]:
p.text = "{0} {1} {2}".format(float(p.x) - 0.01, p.y, p.z)
elif organtech == "cion_rectum":
if p.z == "0.75" and M == 2:
p.text = "{0} {1} {2}".format(float(p.x) + 0.01, p.y, p.z)
if p.z == "0.75" and M in [3, 4]:
p.text = "{0} {1} {2}".format(float(p.x) + 0.015, p.y, p.z)
elif p.z == "1":
p.text = "{0} {1} {2:.1f}".format(float(p.x) + 0.01, p.y, float(p.z))
elif organtech == "proton_bladder":
if p.z == "2.5" and M == 1 and N == 2:
p.text = "{0} {1} {2}".format(float(p.x) + 0.018, p.y, p.z)
elif p.z == "2.5" and M == 1 and N == 3:
p.text = "{0} {1} {2}".format(float(p.x) + 0.012, p.y, p.z)
elif p.z == "2.5" and M == 1 and N == 4:
p.text = "{0} {1} {2}".format(float(p.x) + 0.01, p.y, p.z)
elif p.z == "2.5" and M == 2 and N == 3:
p.text = "{0} {1} {2}".format(float(p.x) + 0.014, p.y, p.z)
elif p.z == "2.5" and M == 2 and N == 4:
p.text = "{0} {1} {2}".format(float(p.x) + 0.01, p.y, p.z)
elif p.z == "2.5" and M == 3 and N == 3:
p.text = "{0} {1} {2}".format(float(p.x) + 0.015, p.y, p.z)
elif p.z == "2.5" and M == 3 and N == 4:
p.text = "{0} {1} {2}".format(float(p.x) + 0.008, p.y, p.z)
elif p.z == "2.5" and M == 4 and N in [1]:
p.text = "{0} {1} {2}".format(float(p.x) + 0.024, p.y, p.z)
elif p.z == "2.5" and M == 4 and N in [2, 3]:
p.text = "{0} {1} {2}".format(float(p.x) + 0.015, p.y, p.z)
elif p.z == "2.25" and M == 1:
p.text = "{0} {1} {2}".format(float(p.x) + 0.01, p.y, p.z)
elif p.z == "2.25" and M == 2 and N == 4:
p.text = "{0} {1} {2}".format(float(p.x) + 0.01, p.y, p.z)
elif organtech == "proton_rectum":
if p.z == "1.5":
p.text = "{0} {1} {2}".format(float(p.x) + 0.01, p.y, p.z)
outfile.write(p.text + "\n");
|
Particle-Therapy-Group-Bergen/PTPB
|
runs/relativeRiskAnalysis/makeContourLabels.py
|
Python
|
gpl-3.0
| 9,462
|
class Solution:
# @return a list of lists of length 3, [[val1,val2,val3]]
def threeSumClosest(self, num, target):
sorted_num = sorted(num)
distance = float('inf')
sum = 0
for i in range(len(num)-2):
a = sorted_num[i]
if a-target > distance:
break
if i > 0 and sorted_num[i-1] == a:
continue
j = i+1
k = len(num)-1
while(j < k):
tmp = a + sorted_num[j] + sorted_num[k]
d = tmp-target
if abs(d) < distance:
distance = abs(d)
sum = tmp
if d > 0:
k = k - 1
while(k >= 0 and sorted_num[k] == sorted_num[k+1]):
k = k - 1
elif d < 0:
j = j+1
while(j < len(num) and sorted_num[j] == sorted_num[j-1]):
j = j + 1
else:
return sum
return sum
nums = [-13,10,11,-3,8,11,-4,8,12,-13,5,-6,-4,-2,12,11,7,-7,-3,10,
12,13,-3,-2,6,-1,14,7,-13,8,14,-10,-4,10,-6,11,-2,-3,4,-13,
0,-14,-3,3,-9,-6,-9,13,-6,3,1,-9,-6,13,-4,-15,-11,-12,7,-9,
3,-2,-12,6,-15,-10,2,-2,-6,13,1,9,14,5,-11,-10,14,-5,11,-6,6,
-3,-8,-15,-13,-4,7,13,-1,-9,11,-13,-4,-15,9,-4,12,-4,1,-9,-5,
9,8,-14,-1,4,14]
nums1 = [-4,-2,-2,-2,0,1,2,2,2,3,3,4,4,6,6]
num2 = [1, 1, 1, 1]
solution = Solution()
print(solution.threeSumClosest(num2, 0))
|
lutianming/leetcode
|
3sum_closest.py
|
Python
|
mit
| 1,566
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Person
# Returns members of Congress and U.S. Presidents since the founding of the nation.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Person(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Person Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Person, self).__init__(temboo_session, '/Library/GovTrack/Person')
def new_input_set(self):
return PersonInputSet()
def _make_result_set(self, result, path):
return PersonResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return PersonChoreographyExecution(session, exec_id, path)
class PersonInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Person
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma separated list of fields to return in the response. Use double-underscores to span relationships (e.g. person__firstname).)
"""
super(PersonInputSet, self)._set_input('Fields', value)
def set_Gender(self, value):
"""
Set the value of the Gender input for this Choreo. ((optional, string) The person's gender (male or female). For historical data, gender is sometimes not specified. Filter operators allowed. Sortable.)
"""
super(PersonInputSet, self)._set_input('Gender', value)
def set_LastName(self, value):
"""
Set the value of the LastName input for this Choreo. ((optional, string) The representative's last name. Filter operators allowed. Sortable.)
"""
super(PersonInputSet, self)._set_input('LastName', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) Results are paged 100 per call by default. Set the limit input to a high value to get all of the results at once.)
"""
super(PersonInputSet, self)._set_input('Limit', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Offset the results by the number given, useful for paging through results.)
"""
super(PersonInputSet, self)._set_input('Offset', value)
def set_PersonID(self, value):
"""
Set the value of the PersonID input for this Choreo. ((optional, integer) The ID number for a person to retrieve. When using this input, all other filter parameters are ignored, and a single record is returned.)
"""
super(PersonInputSet, self)._set_input('PersonID', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((conditional, string) Filters according to a full-text search on the object.)
"""
super(PersonInputSet, self)._set_input('Query', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(PersonInputSet, self)._set_input('ResponseFormat', value)
def set_Sort(self, value):
"""
Set the value of the Sort input for this Choreo. ((optional, string) You can order the results using fieldname (ascending) or -fieldname (descending) where "fieldname" is one of the variables that is listed as 'Sortable' in the description. Ex: '-lastname')
"""
super(PersonInputSet, self)._set_input('Sort', value)
class PersonResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Person Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from GovTrack.)
"""
return self._output.get('Response', None)
class PersonChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return PersonResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/GovTrack/Person.py
|
Python
|
apache-2.0
| 5,414
|
import difflib
from googkit.lib.i18n import _
class Help(object):
"""A class that prints a help message."""
"""A threshold of a similarity ratio for command candidates"""
CANDIDATE_RATIO_THRSHOLD = 0.5
def __init__(self, tree, argument):
"""Creates a help message builder with the command tree and the Argument instance.
"""
self._tree = tree
self._argument = argument
self._correct_commands = tree.right_commands(argument.commands)
self._available_commands = tree.available_commands(self._correct_commands)
def _is_valid_commands(self):
commands = self._argument.commands
return (not commands) or (commands == self._correct_commands)
def _print_usage(self):
commands_mark = _('<commands>') if self._available_commands else ''
if self._correct_commands:
print(_('Usage: googkit {cmd} {cmds_mark}').format(
cmd=' '.join(self._correct_commands),
cmds_mark=commands_mark))
else:
print(_('Usage: googkit {cmds_mark}').format(
cmds_mark=commands_mark))
@classmethod
def similarity(cls, src_command):
"""Returns a function that returns a similarity ratio for the specified
command.
"""
def ratio(command):
return difflib.SequenceMatcher(None, src_command, command).ratio()
return ratio
@classmethod
def candidates(cls, available_commands, src_command):
"""Returns command candidates for the specified available commands and
the source command.
"""
if src_command is None:
return available_commands
ratio = Help.similarity(src_command)
candidates = sorted(available_commands, key=ratio, reverse=True)
return [x for x in candidates if ratio(x) >= Help.CANDIDATE_RATIO_THRSHOLD]
def _print_available_commands(self, command):
if not self._available_commands:
return
print('')
if self._is_valid_commands():
print(_('Available commands:'))
commands = self._available_commands
else:
candidates = Help.candidates(self._available_commands, command)
if len(candidates) == 0:
print(_('Available commands:'))
commands = self._available_commands
elif len(candidates) == 1:
print(_('Did you mean this?'))
commands = candidates
else:
print(_('Did you mean one of these?'))
commands = candidates
for name in commands:
print(' ' + name)
def _print_available_options(self):
cls = self._tree.command_class(self._argument.commands)
if not cls:
return
supported_options = cls.supported_options()
if not supported_options:
return
print('')
print(_('Available options:'))
for name in supported_options:
print(' ' + name)
def print_help(self):
"""Prints a help message.
"""
last_command = None if not self._argument.commands else self._argument.commands[-1]
if not self._is_valid_commands():
print(_('Invalid command: {cmd}').format(cmd=last_command))
print('')
self._print_usage()
self._print_available_commands(last_command)
self._print_available_options()
|
googkit/googkit
|
googkit/lib/help.py
|
Python
|
mit
| 3,492
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=too-many-arguments, too-many-locals
"""Definition of various recurrent neural network cells."""
from __future__ import print_function
import bisect
import random
import numpy as np
from ..io import DataIter, DataBatch, DataDesc
from .. import ndarray
def encode_sentences(sentences, vocab=None, invalid_label=-1, invalid_key='\n', start_label=0):
"""Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
sentences : list of list of str
A list of sentences to encode. Each sentence
should be a list of string tokens.
vocab : None or dict of str -> int
Optional input Vocabulary
invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
invalid_key : str, default '\\n'
Key for invalid token. Use '\\n' for end
of sentence by default.
start_label : int
lowest index.
Returns
-------
result : list of list of int
encoded sentences
vocab : dict of str -> int
result vocabulary
"""
idx = start_label
if vocab is None:
vocab = {invalid_key: invalid_label}
new_vocab = True
else:
new_vocab = False
res = []
for sent in sentences:
coded = []
for word in sent:
if word not in vocab:
assert new_vocab, "Unknown token %s"%word
if idx == invalid_label:
idx += 1
vocab[word] = idx
idx += 1
coded.append(vocab[word])
res.append(coded)
return res, vocab
class BucketSentenceIter(DataIter):
"""Simple bucketing iterator for language model.
The label at each sequence step is the following token
in the sequence.
Parameters
----------
sentences : list of list of int
Encoded sentences.
batch_size : int
Batch size of the data.
invalid_label : int, optional
Key for invalid label, e.g. <end-of-sentence>. The default is -1.
dtype : str, optional
Data type of the encoding. The default data type is 'float32'.
buckets : list of int, optional
Size of the data buckets. Automatically generated if None.
data_name : str, optional
Name of the data. The default name is 'data'.
label_name : str, optional
Name of the label. The default name is 'softmax_label'.
layout : str, optional
Format of data and label. 'NT' means (batch_size, length)
and 'TN' means (length, batch_size).
"""
def __init__(self, sentences, batch_size, buckets=None, invalid_label=-1,
data_name='data', label_name='softmax_label', dtype='float32',
layout='NT'):
super(BucketSentenceIter, self).__init__()
if not buckets:
buckets = [i for i, j in enumerate(np.bincount([len(s) for s in sentences]))
if j >= batch_size]
buckets.sort()
ndiscard = 0
self.data = [[] for _ in buckets]
for i, sent in enumerate(sentences):
buck = bisect.bisect_left(buckets, len(sent))
if buck == len(buckets):
ndiscard += 1
continue
buff = np.full((buckets[buck],), invalid_label, dtype=dtype)
buff[:len(sent)] = sent
self.data[buck].append(buff)
self.data = [np.asarray(i, dtype=dtype) for i in self.data if i]
print("WARNING: discarded %d sentences longer than the largest bucket."%ndiscard)
self.batch_size = batch_size
self.buckets = buckets
self.data_name = data_name
self.label_name = label_name
self.dtype = dtype
self.invalid_label = invalid_label
self.nddata = []
self.ndlabel = []
self.major_axis = layout.find('N')
self.layout = layout
self.default_bucket_key = max(buckets)
if self.major_axis == 0:
self.provide_data = [DataDesc(
name=self.data_name, shape=(batch_size, self.default_bucket_key),
layout=self.layout)]
self.provide_label = [DataDesc(
name=self.label_name, shape=(batch_size, self.default_bucket_key),
layout=self.layout)]
elif self.major_axis == 1:
self.provide_data = [DataDesc(
name=self.data_name, shape=(self.default_bucket_key, batch_size),
layout=self.layout)]
self.provide_label = [DataDesc(
name=self.label_name, shape=(self.default_bucket_key, batch_size),
layout=self.layout)]
else:
raise ValueError("Invalid layout %s: Must by NT (batch major) or TN (time major)")
self.idx = []
for i, buck in enumerate(self.data):
self.idx.extend([(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)])
self.curr_idx = 0
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck)
self.nddata = []
self.ndlabel = []
for buck in self.data:
label = np.empty_like(buck)
label[:, :-1] = buck[:, 1:]
label[:, -1] = self.invalid_label
self.nddata.append(ndarray.array(buck, dtype=self.dtype))
self.ndlabel.append(ndarray.array(label, dtype=self.dtype))
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
data = self.nddata[i][j:j+self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size].T
else:
data = self.nddata[i][j:j+self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
return DataBatch([data], [label], pad=0,
bucket_key=self.buckets[i],
provide_data=[DataDesc(
name=self.data_name, shape=data.shape,
layout=self.layout)],
provide_label=[DataDesc(
name=self.label_name, shape=label.shape,
layout=self.layout)])
|
luoyetx/mxnet
|
python/mxnet/rnn/io.py
|
Python
|
apache-2.0
| 7,353
|
"""
Views related to content libraries.
A content library is a structure containing XBlocks which can be re-used in the
multiple courses.
"""
from __future__ import absolute_import
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponseForbidden, HttpResponseNotAllowed
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator, LibraryUsageLocator
from contentstore.utils import add_instructor, reverse_library_url
from contentstore.views.item import create_xblock_info
from course_creators.views import get_course_creator_status
from edxmako.shortcuts import render_to_response
from student.auth import (
STUDIO_EDIT_ROLES,
STUDIO_VIEW_USERS,
get_user_permissions,
has_studio_read_access,
has_studio_write_access
)
from student.roles import CourseInstructorRole, CourseStaffRole, LibraryUserRole
from util.json_request import JsonResponse, JsonResponseBadRequest, expect_json
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import DuplicateCourseError
from openedx.eduscaled.cms.utils import can_create_library
from .component import CONTAINER_TEMPLATES, get_component_templates
from .user import user_with_role
__all__ = ['library_handler', 'manage_library_users']
log = logging.getLogger(__name__)
LIBRARIES_ENABLED = settings.FEATURES.get('ENABLE_CONTENT_LIBRARIES', False)
def get_library_creator_status(user):
"""
Helper method for returning the library creation status for a particular user,
taking into account the value LIBRARIES_ENABLED.
"""
if not LIBRARIES_ENABLED:
return False
elif user.is_staff or can_create_library(user):
return True
elif settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
return get_course_creator_status(user) == 'granted'
else:
return True
@login_required
@ensure_csrf_cookie
@require_http_methods(('GET', 'POST'))
def library_handler(request, library_key_string=None):
"""
RESTful interface to most content library related functionality.
"""
if not LIBRARIES_ENABLED:
log.exception("Attempted to use the content library API when the libraries feature is disabled.")
raise Http404 # Should never happen because we test the feature in urls.py also
if not get_library_creator_status(request.user):
if not request.user.is_staff:
return HttpResponseForbidden()
if library_key_string is not None and request.method == 'POST':
return HttpResponseNotAllowed(("POST",))
if request.method == 'POST':
return _create_library(request)
# request method is get, since only GET and POST are allowed by @require_http_methods(('GET', 'POST'))
if library_key_string:
return _display_library(library_key_string, request)
return _list_libraries(request)
def _display_library(library_key_string, request):
"""
Displays single library
"""
library_key = CourseKey.from_string(library_key_string)
if not isinstance(library_key, LibraryLocator):
log.exception("Non-library key passed to content libraries API.") # Should never happen due to url regex
raise Http404 # This is not a library
if not has_studio_read_access(request.user, library_key):
log.exception(
u"User %s tried to access library %s without permission",
request.user.username, unicode(library_key)
)
raise PermissionDenied()
library = modulestore().get_library(library_key)
if library is None:
log.exception(u"Library %s not found", unicode(library_key))
raise Http404
response_format = 'html'
if (
request.GET.get('format', 'html') == 'json' or
'application/json' in request.META.get('HTTP_ACCEPT', 'text/html')
):
response_format = 'json'
return library_blocks_view(library, request.user, response_format)
def _list_libraries(request):
"""
List all accessible libraries
"""
lib_info = [
{
"display_name": lib.display_name,
"library_key": unicode(lib.location.library_key),
}
for lib in modulestore().get_libraries()
if has_studio_read_access(request.user, lib.location.library_key)
]
return JsonResponse(lib_info)
@expect_json
def _create_library(request):
"""
Helper method for creating a new library.
"""
display_name = None
try:
display_name = request.json['display_name']
org = request.json['org']
library = request.json.get('number', None)
if library is None:
library = request.json['library']
store = modulestore()
with store.default_store(ModuleStoreEnum.Type.split):
new_lib = store.create_library(
org=org,
library=library,
user_id=request.user.id,
fields={"display_name": display_name},
)
# Give the user admin ("Instructor") role for this library:
add_instructor(new_lib.location.library_key, request.user, request.user)
except KeyError as error:
log.exception("Unable to create library - missing required JSON key.")
return JsonResponseBadRequest({
"ErrMsg": _("Unable to create library - missing required field '{field}'").format(field=error.message)
})
except InvalidKeyError as error:
log.exception("Unable to create library - invalid key.")
return JsonResponseBadRequest({
"ErrMsg": _("Unable to create library '{name}'.\n\n{err}").format(name=display_name, err=error.message)
})
except DuplicateCourseError:
log.exception("Unable to create library - one already exists with the same key.")
return JsonResponseBadRequest({
'ErrMsg': _(
'There is already a library defined with the same '
'organization and library code. Please '
'change your library code so that it is unique within your organization.'
)
})
lib_key_str = unicode(new_lib.location.library_key)
return JsonResponse({
'url': reverse_library_url('library_handler', lib_key_str),
'library_key': lib_key_str,
})
def library_blocks_view(library, user, response_format):
"""
The main view of a course's content library.
Shows all the XBlocks in the library, and allows adding/editing/deleting
them.
Can be called with response_format="json" to get a JSON-formatted list of
the XBlocks in the library along with library metadata.
Assumes that read permissions have been checked before calling this.
"""
assert isinstance(library.location.library_key, LibraryLocator)
assert isinstance(library.location, LibraryUsageLocator)
children = library.children
if response_format == "json":
# The JSON response for this request is short and sweet:
prev_version = library.runtime.course_entry.structure['previous_version']
return JsonResponse({
"display_name": library.display_name,
"library_id": unicode(library.location.library_key),
"version": unicode(library.runtime.course_entry.course_key.version),
"previous_version": unicode(prev_version) if prev_version else None,
"blocks": [unicode(x) for x in children],
})
can_edit = has_studio_write_access(user, library.location.library_key)
xblock_info = create_xblock_info(library, include_ancestor_info=False, graders=[])
component_templates = get_component_templates(library, library=True) if can_edit else []
return render_to_response('library.html', {
'can_edit': can_edit,
'context_library': library,
'component_templates': component_templates,
'xblock_info': xblock_info,
'templates': CONTAINER_TEMPLATES,
})
def manage_library_users(request, library_key_string):
"""
Studio UI for editing the users within a library.
Uses the /course_team/:library_key/:user_email/ REST API to make changes.
"""
library_key = CourseKey.from_string(library_key_string)
if not isinstance(library_key, LibraryLocator):
raise Http404 # This is not a library
user_perms = get_user_permissions(request.user, library_key)
if not user_perms & STUDIO_VIEW_USERS:
raise PermissionDenied()
library = modulestore().get_library(library_key)
if library is None:
raise Http404
# Segment all the users explicitly associated with this library, ensuring each user only has one role listed:
instructors = set(CourseInstructorRole(library_key).users_with_role())
staff = set(CourseStaffRole(library_key).users_with_role()) - instructors
users = set(LibraryUserRole(library_key).users_with_role()) - instructors - staff
formatted_users = []
for user in instructors:
formatted_users.append(user_with_role(user, 'instructor'))
for user in staff:
formatted_users.append(user_with_role(user, 'staff'))
for user in users:
formatted_users.append(user_with_role(user, 'library_user'))
return render_to_response('manage_users_lib.html', {
'context_library': library,
'users': formatted_users,
'allow_actions': bool(user_perms & STUDIO_EDIT_ROLES),
'library_key': unicode(library_key),
'lib_users_url': reverse_library_url('manage_library_users', library_key_string),
'show_children_previews': library.show_children_previews
})
|
miptliot/edx-platform
|
cms/djangoapps/contentstore/views/library.py
|
Python
|
agpl-3.0
| 10,006
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Library information."""
from __future__ import absolute_import
import sys
import os
from .environment import get_vta_hw_path
def _get_lib_name(lib_name):
"""Get lib name with extension
Returns
-------
lib_name_ext : str
Name of VTA shared library with extension
Parameters
------------
lib_name : str
Name of VTA shared library
"""
if sys.platform.startswith("win32"):
return lib_name + ".dll"
if sys.platform.startswith("darwin"):
return lib_name + ".dylib"
return lib_name + ".so"
def find_libvta(lib_vta, optional=False):
"""Find VTA Chisel-based library
Returns
-------
lib_found : str
Library path
Parameters
------------
lib_vta : str
Name of VTA shared library
optional : bool
Enable error check
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
tvm_library_path = os.environ.get("TVM_LIBRARY_PATH", None)
if tvm_library_path is None:
tvm_library_path = os.path.join(
curr_path,
os.pardir,
os.pardir,
os.pardir,
"build",
)
lib_search = [tvm_library_path, os.path.join(get_vta_hw_path(), "build")]
lib_name = _get_lib_name(lib_vta)
lib_path = [os.path.join(x, lib_name) for x in lib_search]
lib_found = [x for x in lib_path if os.path.exists(x)]
if not lib_found and not optional:
raise RuntimeError(
"Cannot find the files.\n" + "List of candidates:\n" + str("\n".join(lib_path))
)
return lib_found
|
dmlc/tvm
|
vta/python/vta/libinfo.py
|
Python
|
apache-2.0
| 2,410
|
# Copyright (c) 2007, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
from __future__ import absolute_import
from traits.testing.unittest_tools import unittest
from ..api import HasTraits, Int, Range, Str, TraitError
class WithFloatRange(HasTraits):
r = Range(0.0, 100.0)
r_copied_on_change = Str
_changed_handler_calls = Int
def _r_changed(self, old, new):
self._changed_handler_calls += 1
self.r_copied_on_change = str(self.r)
if (self.r % 10) > 0:
self.r += 10 - (self.r % 10)
class WithLargeIntRange(HasTraits):
r = Range(0, 1000)
r_copied_on_change = Str
_changed_handler_calls = Int
def _r_changed(self, old, new):
self._changed_handler_calls += 1
self.r_copied_on_change = str(self.r)
if self.r > 100:
self.r = 0
class WithDynamicRange(HasTraits):
low = Int(0)
high = Int(10)
value = Int(3)
r = Range(value='value', low='low', high='high', exclude_high=True)
def _r_changed(self, old, new):
self._changed_handler_calls += 1
class RangeTestCase(unittest.TestCase):
def test_non_ui_events(self):
obj = WithFloatRange()
obj._changed_handler_calls = 0
obj.r = 10
self.assertEqual(1, obj._changed_handler_calls)
obj._changed_handler_calls = 0
obj.r = 34.56
self.assertEqual(obj._changed_handler_calls, 2)
self.assertEqual(obj.r, 40)
def test_non_ui_int_events(self):
# Even though the range is configured for 0..1000, the handler resets
# the value to 0 when it exceeds 100.
obj = WithLargeIntRange()
obj._changed_handler_calls = 0
obj.r = 10
self.assertEqual(obj._changed_handler_calls, 1)
self.assertEqual(obj.r, 10)
obj.r = 100
self.assertEqual(obj._changed_handler_calls, 2)
self.assertEqual(obj.r, 100)
obj.r = 101
self.assertEqual(obj._changed_handler_calls, 4)
self.assertEqual(obj.r, 0)
def test_dynamic_events(self):
obj = WithDynamicRange()
obj._changed_handler_calls = 0
obj.r = 5
self.assertEqual(obj._changed_handler_calls, 1)
self.assertEqual(obj.r, 5)
with self.assertRaises(TraitError):
obj.r = obj.high
self.assertEqual(obj.r, 5)
|
burnpanck/traits
|
traits/tests/test_range.py
|
Python
|
bsd-3-clause
| 2,646
|
#!/usr/bin/python3
# Sir helps you to do automated TLS certificate rollovers, including TLSA updates.
# Copyright (C) 2015 Skruppy <skruppy@onmars.eu>
#
# This file is part of Sir.
#
# Sir is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sir is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sir. If not, see <http://www.gnu.org/licenses/>.
from sir.sir import Sir
if __name__ == '__main__':
Sir().main()
|
Skrupellos/sir
|
sir.py
|
Python
|
gpl-3.0
| 862
|
"""
Start the function name with a lower case letter.
It's rule for only Unix / Linux C/C++ code.
== Violation ==
bool CheckSth() { <== Violation. The function name starts with uppercase C.
return false;
}
bool _CheckSth() { <== Violation. The function name starts with uppercase C.
return false;
}
== Good ==
bool isSth() { <== OK.
return true;
}
"""
from nsiqunittest.nsiqcppstyle_unittestbase import *
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, fullName, decl, contextStack, context):
t = lexer.GetCurToken()
value = t.value
if value.startswith("_"):
value = value[1:]
if value.startswith("~"):
value = value[1:]
if Search("^[A-Z]", value):
if IsConstuctor(value, fullName, contextStack.SigPeek()):
return
if IsOperator(value):
return
nsiqcppstyle_reporter.Error(
t, __name__, "Do not start function name(%s) with uppercase" % fullName)
ruleManager.AddFunctionNameRule(RunRule)
##########################################################################
# Unit Test
##########################################################################
class testRule(nct):
def setUpRule(self):
ruleManager.AddFunctionNameRule(RunRule)
def test1(self):
self.Analyze("test/thisFile.c",
"""
bool CanHave() {
}""")
self.ExpectError(__name__)
def test2(self):
self.Analyze("test/thisFile.c",
"""
bool CTEST:CanHave() {
}""")
self.ExpectError(__name__)
def test3(self):
self.Analyze("test/thisFile.c",
"""
extern bool CTEST:canHave() {
}""")
self.ExpectSuccess(__name__)
def test4(self):
self.Analyze("test/thisFile.c",
"""
extern int CTEST:_CanHave() {
}""")
self.ExpectError(__name__)
def test5(self):
self.Analyze("test/thisFile.c",
"""
class AA {
extern int ~IsIt();
}""")
self.ExpectError(__name__)
def test6(self):
self.Analyze("test/thisFile.c",
"""
class K {
extern bool CTEST:canHave();
}""")
self.ExpectSuccess(__name__)
def test7(self):
self.Analyze("test/thisFile.c",
"""
class K {
a = new EE();
}""")
self.ExpectSuccess(__name__)
def test8(self):
self.Analyze("test/thisFile.c",
"""
class K {
int Hello()
int EE();
}""")
self.ExpectError(__name__)
def test9(self):
self.Analyze("test/thisFile.c",
"""
class K {
int K()
int ~K()
int ee();
}""")
self.ExpectSuccess(__name__)
def test10(self):
self.Analyze("test/thisFile.c",
"""
#define TT KK() {\
}}
""")
self.ExpectSuccess(__name__)
def test11(self):
self.Analyze("test/thisFile.c",
"""
void KK::KK() {
}
""")
self.ExpectSuccess(__name__)
def test12(self):
self.Analyze("test/thisFile.c",
"""
void KK::~KK() {
}
""")
self.ExpectSuccess(__name__)
def test13(self):
self.Analyze("test/thisFile.c",
"""
TEST()
BLOCK1()
BLOCK2()
BLOCK3()
""")
self.ExpectSuccess(__name__)
def test14(self):
self.Analyze("test/thisFile.c",
"""
void KK() {
}
""")
self.ExpectError(__name__)
|
kunaltyagi/nsiqcppstyle
|
rules/RULE_3_3_A_start_function_name_with_lowercase_unix.py
|
Python
|
gpl-2.0
| 3,754
|
#!/usr/bin/python
#
# Build Duktape website. Must be run with cwd in the website/ directory.
#
import os
import sys
import time
import datetime
import shutil
import re
import tempfile
import atexit
import md5
from bs4 import BeautifulSoup, Tag
colorize = True
fancy_stack = True
remove_fixme = True
testcase_refs = False
list_tags = False
fancy_releaselog = True
dt_now = datetime.datetime.utcnow()
def readFile(x):
f = open(x, 'rb')
data = f.read()
f.close()
return data
def htmlEscape(x):
res = ''
esc = '&<>'
for c in x:
if ord(c) >= 0x20 and ord(c) <= 0x7e and c not in esc:
res += c
else:
res += '&#x%04x;' % ord(c)
return res
def getAutodeleteTempname():
tmp = tempfile.mktemp(suffix='duktape-website')
def f():
os.remove(tmp)
atexit.register(f)
return tmp
# also escapes text automatically
def sourceHighlight(x, sourceLang):
tmp1 = getAutodeleteTempname()
tmp2 = getAutodeleteTempname()
f = open(tmp1, 'wb') # FIXME
f.write(x)
f.close()
# FIXME: safer execution
os.system('source-highlight -s %s -c highlight.css --no-doc <"%s" >"%s"' % \
(sourceLang, tmp1, tmp2))
f = open(tmp2, 'rb')
res = f.read()
f.close()
return res
def rst2Html(filename):
tmp1 = getAutodeleteTempname()
# FIXME: safer execution
os.system('rst2html "%s" >"%s"' % \
(filename, tmp1))
f = open(tmp1, 'rb')
res = f.read()
f.close()
return res
def getFileMd5(filename):
if not os.path.exists(filename):
return None
f = open(filename, 'rb')
d = f.read()
f.close()
return md5.md5(d).digest().encode('hex')
def stripNewline(x):
if len(x) > 0 and x[-1] == '\n':
return x[:-1]
return x
def validateAndParseHtml(data):
# first parse as xml to get errors out
ign_soup = BeautifulSoup(data, 'xml')
# then parse as lenient html, no xml tags etc
soup = BeautifulSoup(data)
return soup
re_stack_line = re.compile(r'^(\[[^\x5d]+\])(?:\s+->\s+(\[[^\x5d]+\]))?(?:\s+(.*?))?\s*$')
def renderFancyStack(inp_line):
# Support various notations here:
#
# [ a b c ]
# [ a b c ] -> [ d e f ]
# [ a b c ] -> [ d e f ] (if foo)
#
m = re_stack_line.match(inp_line)
#print(inp_line)
assert(m is not None)
stacks = [ m.group(1) ]
if m.group(2) is not None:
stacks.append(m.group(2))
res = []
res.append('<div class="stack-wrapper">')
for idx, stk in enumerate(stacks):
if idx > 0:
res.append('<span class="arrow"><b>→</b></span>')
res.append('<span class="stack">')
for part in stk.split(' '):
part = part.strip()
elem_classes = []
elem_classes.append('elem') #FIXME
if len(part) > 0 and part[-1] == '!':
part = part[:-1]
elem_classes.append('active')
elif len(part) > 0 and part[-1] == '*':
part = part[:-1]
elem_classes.append('referred')
elif len(part) > 0 and part[-1] == '?':
part = part[:-1]
elem_classes.append('ghost')
text = part
# FIXME: detect special constants like "true", "null", etc?
if text in [ 'undefined', 'null', 'true', 'false', 'NaN' ] or \
(len(text) > 0 and text[0] == '"' and text[-1] == '"'):
elem_classes.append('literal')
# FIXME: inline elements for reduced size?
# The stack elements use a classless markup to minimize result
# HTML size. HTML inline elements are used to denote different
# kinds of elements; the elements should be reasonable for text
# browsers so a limited set can be used.
use_inline = False
if part == '':
continue
if part == '[':
#res.append('<em>[</em>')
res.append('<span class="cap">[</span>')
continue
if part == ']':
#res.append('<em>]</em>')
res.append('<span class="cap">]</span>')
continue
if part == '...':
text = '. . .'
elem_classes.append('ellipsis')
else:
text = part
if 'ellipsis' in elem_classes and use_inline:
res.append('<i>' + htmlEscape(text) + '</i>')
elif 'active' in elem_classes and use_inline:
res.append('<b>' + htmlEscape(text) + '</b>')
else:
res.append('<span class="' + ' '.join(elem_classes) + '">' + htmlEscape(text) + '</span>')
res.append('</span>')
# FIXME: pretty badly styled now
if m.group(3) is not None:
res.append('<span class="stack-comment">' + htmlEscape(m.group(3)) + '</span>')
res.append('</div>')
return ' '.join(res) + '\n' # stack is a one-liner; spaces are for text browser rendering
def parseApiDoc(filename):
f = open(filename, 'rb')
parts = {}
state = None
for line in f.readlines():
line = stripNewline(line)
if line.startswith('='):
state = line[1:]
elif state is not None:
if not parts.has_key(state):
parts[state] = []
parts[state].append(line)
else:
if line != '':
raise Exception('unparsed non-empty line: %r' % line)
else:
# ignore
pass
f.close()
# remove leading and trailing empty lines
for k in parts:
p = parts[k]
while len(p) > 0 and p[0] == '':
p.pop(0)
while len(p) > 0 and p[-1] == '':
p.pop()
return parts
def processApiDoc(parts, funcname, testrefs, used_tags):
res = []
# the 'hidechar' span is to allow browser search without showing the char
res.append('<h1 id="%s"><a href="#%s"><span class="hidechar">.</span>%s()</a></h1>' % (funcname, funcname, funcname))
if parts.has_key('proto'):
p = parts['proto']
res.append('<h2>Prototype</h2>')
res.append('<pre class="c-code">')
for i in p:
res.append(htmlEscape(i))
res.append('</pre>')
res.append('')
else:
pass
if parts.has_key('stack'):
p = parts['stack']
res.append('<h2>Stack</h2>')
for line in p:
res.append('<pre class="stack">' + \
'%s' % htmlEscape(line) + \
'</pre>')
res.append('')
else:
res.append('<h2>Stack</h2>')
res.append('<p>No effect.</p>')
res.append('')
if parts.has_key('summary'):
p = parts['summary']
res.append('<h2>Summary</h2>')
# If text contains a '<p>', assume it is raw HTML; otherwise
# assume it is a single paragraph (with no markup) and generate
# paragraph tags, escaping into HTML
raw_html = False
for i in p:
if '<p>' in i:
raw_html = True
if raw_html:
for i in p:
res.append(i)
else:
res.append('<p>')
for i in p:
res.append(htmlEscape(i))
res.append('</p>')
res.append('')
if parts.has_key('example'):
p = parts['example']
res.append('<h2>Example</h2>')
res.append('<pre class="c-code">')
for i in p:
res.append(htmlEscape(i))
res.append('</pre>')
res.append('')
if parts.has_key('seealso'):
p = parts['seealso']
res.append('<h2>See also</h2>')
res.append('<ul>')
for i in p:
res.append('<li><a href="#%s">%s</a></li>' % (htmlEscape(i), htmlEscape(i)))
res.append('</ul>')
if testcase_refs:
res.append('<h2>Related test cases</h2>')
if testrefs.has_key(funcname):
res.append('<ul>')
for i in testrefs[funcname]:
res.append('<li>%s</li>' % htmlEscape(i))
res.append('</ul>')
else:
res.append('<p>None.</p>')
if not testrefs.has_key(funcname):
res.append('<div class="fixme">This API call has no test cases.</div>')
if list_tags and parts.has_key('tags'):
# FIXME: placeholder
res.append('<h2>Tags</h2>')
res.append('<p>')
p = parts['tags']
for idx, val in enumerate(p):
if idx > 0:
res.append(' ')
res.append(htmlEscape(val))
res.append('</p>')
res.append('')
if parts.has_key('fixme'):
p = parts['fixme']
res.append('<div class="fixme">')
for i in p:
res.append(htmlEscape(i))
res.append('</div>')
res.append('')
return res
def processRawDoc(filename):
f = open(filename, 'rb')
res = []
for line in f.readlines():
line = stripNewline(line)
res.append(line)
f.close()
res.append('')
return res
def transformColorizeCode(soup, cssClass, sourceLang):
for elem in soup.select('pre.' + cssClass):
input_str = elem.string
if len(input_str) > 0 and input_str[0] == '\n':
# hack for leading empty line
input_str = input_str[1:]
colorized = sourceHighlight(input_str, sourceLang)
# source-highlight generates <pre><tt>...</tt></pre>, get rid of <tt>
new_elem = BeautifulSoup(colorized).tt # XXX: parse just a fragment - how?
new_elem.name = 'pre'
new_elem['class'] = cssClass
elem.replace_with(new_elem)
def transformFancyStacks(soup):
for elem in soup.select('pre.stack'):
input_str = elem.string
if len(input_str) > 0 and input_str[0] == '\n':
# hack for leading empty line
input_str = input_str[1:]
new_elem = BeautifulSoup(renderFancyStack(input_str)).div # XXX: fragment?
elem.replace_with(new_elem)
def transformRemoveClass(soup, cssClass):
for elem in soup.select('.' + cssClass):
elem.extract()
def transformReadIncludes(soup, includeDir):
for elem in soup.select('pre'):
if not elem.has_key('include'):
continue
filename = elem['include']
del elem['include']
f = open(os.path.join(includeDir, filename), 'rb')
elem.string = f.read()
f.close()
def transformVersionNumber(soup, verstr):
for elem in soup.select('.duktape-version'):
elem.replaceWith(verstr)
def transformCurrentDate(soup):
curr_date = '%04d-%02d-%02d' % (dt_now.year, dt_now.month, dt_now.day)
for elem in soup.select('.current-date'):
elem.replaceWith(curr_date)
def transformAddHrBeforeH1(soup):
for elem in soup.select('h1'):
elem.insert_before(soup.new_tag('hr'))
# Add automatic anchors so that a basename from an element with an explicit
# ID is appended with dotted number(s). Note that headings do not actually
# nest in the document, so this is now based on document order traversal and
# keeping track of counts of headings at different levels, and the active
# explicit IDs at each level.
def transformAddAutoAnchorsNumbered(soup):
level_counts = [ 0, 0, 0, 0, 0, 0 ] # h1, h2, h3, h4, h5, h6
level_ids = [ None, None, None, None, None, None ] # explicit IDs
hdr_tags = { 'h1': 0, 'h2': 1, 'h3': 2, 'h4': 3, 'h5': 4, 'h6': 5 }
changes = []
def _proc(root, state):
idx = hdr_tags.get(root.name, None)
if idx is None:
return
# bump count at matching level and zero lower levels
level_counts[idx] += 1
for i in xrange(idx + 1, 6):
level_counts[i] = 0
# set explicit ID for current level
if root.has_key('id'):
level_ids[idx] = root['id']
return
# no explicit ID at current level, clear it
level_ids[idx] = None
# figure out an automatic ID: closest explicit ID + dotted
# numbers to current level
parts = []
for i in xrange(idx, -1, -1): # idx, idx-1, ..., 0
if level_ids[i] is not None:
parts.append(level_ids[i])
break
parts.append(str(level_counts[i]))
if i == 0:
parts.append('doc') # if no ID in path, use e.g. 'doc.1.2'
parts.reverse()
auto_id = '.'.join(parts)
# avoid mutation: record changes to be made first
# (adding 'id' would be OK, but this is more flexible
# if explicit anchors are added instead / in addition
# to 'id' attributes)
changes.append((root, auto_id))
def _rec(root, state):
if not isinstance(root, Tag):
return
_proc(root, state)
for elem in root.children:
_rec(elem, state)
_rec(soup.select('body')[0], {})
for elem, auto_id in changes:
elem['id'] = auto_id
# Add automatic anchors where section headings are used to autogenerate
# suitable names. This does not work very well: there are many subsections
# with the name "Example" or "Limitations", for instance. Prepending the
# parent name (or rather names of all the parents) would create very long
# names.
def transformAddAutoAnchorsNamed(soup):
hdr_tags = [ 'h1', 'h2', 'h3', 'h4', 'h5', 'h6' ]
ids = {}
def findAutoName(txt):
# simple name sanitation, not very well thought out; goal is to get
# nice web-like anchor names from whatever titles are present
txt = txt.strip().lower()
if len(txt) > 1 and txt[0] == '.':
txt = txt[1:] # leading dot convention for API section names
txt = txt.replace('c++', 'cpp')
txt = txt.replace('. ', ' ') # e.g. 'vs.' -> 'vs'
txt = txt.replace(', ', ' ') # e.g. 'foo, bar' -> 'foo bar'
txt = txt.replace(' ', '_')
res = ''
for i,c in enumerate(txt):
if (ord(c) >= ord('a') and ord(c) <= ord('z')) or \
(ord(c) >= ord('A') and ord(c) <= ord('Z')) or \
(ord(c) >= ord('0') and ord(c) <= ord('9') and i > 0) or \
c in '_':
res += c
elif c in '()[]{}?\'"':
pass # eat
else:
res += '_'
return res
for elem in soup.select('*'):
if not elem.has_key('id'):
continue
e_id = elem['id']
if ids.has_key(e_id):
print('WARNING: duplicate id %s' % e_id)
ids[e_id] = True
# add automatic anchors for every other heading, with priority in
# naming for higher level sections (e.g. h2 over h3)
for hdr in hdr_tags:
for elem in soup.select(hdr):
if elem.has_key('id'):
continue # already has an id anchor
e_name = elem.text
a_name = findAutoName(e_name)
if ids.has_key(a_name):
print('WARNING: cannot generate automatic anchor name for %s (already exists)' % e_name)
continue
ids[a_name] = True
elem['id'] = a_name
def transformAddHeadingLinks(soup):
hdr_tags = [ 'h1', 'h2', 'h3', 'h4', 'h5', 'h6' ]
changes = []
for elem in soup.select('*'):
if elem.name not in hdr_tags or not elem.has_key('id'):
continue
new_elem = soup.new_tag('a')
new_elem['href'] = '#' + elem['id']
new_elem['class'] = 'sectionlink'
new_elem.string = u'\u00a7' # section sign
# avoid mutation while iterating
changes.append((elem, new_elem))
for elem, new_elem in changes:
if elem.has_key('class'):
elem['class'] = elem['class'] + ' sectiontitle'
else:
elem['class'] = 'sectiontitle'
elem.append(' ')
elem.append(new_elem)
def setNavSelected(soup, pagename):
# pagename must match <li><a> content
for elem in soup.select('#site-top-nav li'):
if elem.text == pagename:
elem['class'] = 'selected'
# FIXME: refactor shared parts
def scanApiCalls(apitestdir):
re_api_call = re.compile(r'duk_[0-9a-zA-Z_]+')
res = {} # api call -> [ test1, ..., testN ]
tmpfiles = os.listdir(apitestdir)
for filename in tmpfiles:
if os.path.splitext(filename)[1] != '.c':
continue
f = open(os.path.join(apitestdir, filename))
data = f.read()
f.close()
apicalls = re_api_call.findall(data)
for i in apicalls:
if not res.has_key(i):
res[i] = []
if filename not in res[i]:
res[i].append(filename)
for k in res.keys():
res[k].sort()
return res
def createTagIndex(api_docs, used_tags):
res = []
res.append('<h1 id="bytag">API calls by tag</h1>')
for tag in used_tags:
res.append('<h2>' + htmlEscape(tag) + '</h2>')
res.append('<ul class="taglist">')
for doc in api_docs:
if not doc['parts'].has_key('tags'):
continue
for i in doc['parts']['tags']:
if i != tag:
continue
res.append('<li><a href="#%s">%s</a></li>' % (htmlEscape(doc['name']), htmlEscape(doc['name'])))
res.append('</ul>')
return res
def generateApiDoc(apidocdir, apitestdir):
templ_soup = validateAndParseHtml(readFile('template.html'))
setNavSelected(templ_soup, 'API')
# scan api files
tmpfiles = os.listdir(apidocdir)
apifiles = []
for filename in tmpfiles:
if os.path.splitext(filename)[1] == '.txt':
apifiles.append(filename)
apifiles.sort()
#print(apifiles)
print '%d api files' % len(apifiles)
# scan api testcases for references to API calls
testrefs = scanApiCalls(apitestdir)
#print(repr(testrefs))
# title
title_elem = templ_soup.select('#template-title')[0]
del title_elem['id']
title_elem.string = 'Duktape API'
# scan api doc files
used_tags = []
api_docs = [] # [ { 'parts': xxx, 'name': xxx } ]
for filename in apifiles:
parts = parseApiDoc(os.path.join(apidocdir, filename))
funcname = os.path.splitext(os.path.basename(filename))[0]
if parts.has_key('tags') and 'omit' in parts['tags']:
print 'Omit API doc: ' + str(funcname)
continue
if parts.has_key('tags'):
for i in parts['tags']:
if i not in used_tags:
used_tags.append(i)
api_docs.append({ 'parts': parts, 'name': funcname })
used_tags.sort()
# nav
res = []
navlinks = []
navlinks.append(['#introduction', 'Introduction'])
navlinks.append(['#notation', 'Notation'])
navlinks.append(['#concepts', 'Concepts'])
navlinks.append(['#defines', 'Header definitions'])
navlinks.append(['#bytag', 'API calls by tag'])
navlinks.append(['', u'\u00a0']) # XXX: force vertical space
for doc in api_docs:
funcname = doc['name']
navlinks.append(['#' + funcname, funcname])
res.append('<ul>')
for nav in navlinks:
res.append('<li><a href="' + htmlEscape(nav[0]) + '">' + htmlEscape(nav[1]) + '</a></li>')
res.append('</ul>')
nav_soup = validateAndParseHtml('\n'.join(res))
tmp_soup = templ_soup.select('#site-middle-nav')[0]
tmp_soup.clear()
for i in nav_soup.select('body')[0]:
tmp_soup.append(i)
# content
res = []
res += [ '<div class="main-title"><strong>Duktape API</strong></div>' ]
# FIXME: generate from the same list as nav links for these
res += processRawDoc('api/intro.html')
res += processRawDoc('api/notation.html')
res += processRawDoc('api/concepts.html')
res += processRawDoc('api/defines.html')
# tag index
res += createTagIndex(api_docs, used_tags)
# api docs
for doc in api_docs:
# FIXME: Here we'd like to validate individual processApiDoc() results so
# that they don't e.g. have unbalanced tags. Or at least normalize them so
# that they don't break the entire page.
try:
data = processApiDoc(doc['parts'], doc['name'], testrefs, used_tags)
res += data
except:
print repr(data)
print 'FAIL: ' + repr(filename)
raise
print('used tags: ' + repr(used_tags))
content_soup = validateAndParseHtml('\n'.join(res))
tmp_soup = templ_soup.select('#site-middle-content')[0]
tmp_soup.clear()
for i in content_soup.select('body')[0]:
tmp_soup.append(i)
tmp_soup['class'] = 'content'
return templ_soup
def generateIndexPage():
templ_soup = validateAndParseHtml(readFile('template.html'))
index_soup = validateAndParseHtml(readFile('index/index.html'))
setNavSelected(templ_soup, 'Home')
title_elem = templ_soup.select('#template-title')[0]
del title_elem['id']
title_elem.string = 'Duktape'
tmp_soup = templ_soup.select('#site-middle')[0]
tmp_soup.clear()
for i in index_soup.select('body')[0]:
tmp_soup.append(i)
tmp_soup['class'] = 'content'
return templ_soup
def generateDownloadPage(releases_filename):
templ_soup = validateAndParseHtml(readFile('template.html'))
down_soup = validateAndParseHtml(readFile('download/download.html'))
setNavSelected(templ_soup, 'Download')
title_elem = templ_soup.select('#template-title')[0]
del title_elem['id']
title_elem.string = 'Downloads'
if fancy_releaselog:
# fancy releaselog
rel_data = rst2Html(os.path.abspath(os.path.join('..', 'RELEASES.txt')))
rel_soup = BeautifulSoup(rel_data)
released = rel_soup.select('#released')[0]
# massage the rst2html generated HTML to be more suitable
for elem in released.select('h1'):
elem.extract()
releaselog_elem = down_soup.select('#releaselog')[0]
releaselog_elem.insert_after(released)
else:
# plaintext releaselog
releaselog_elem = down_soup.select('#releaselog')[0]
pre_elem = down_soup.new_tag('pre')
releaselog_elem.append(pre_elem)
f = open(releases_filename, 'rb')
pre_elem.string = f.read().decode('utf-8')
f.close()
# automatic md5sums for downloadable files
# <tr><td class="reldate">2013-09-21</td>
# <td class="filename"><a href="duktape-0.6.0.tar.xz">duktape-0.6.0.tar.xz</a></td>
# <td class="description">alpha, first round of work on public API</td>
# <td class="hash">fa384a42a27d996313e0192c51c50b4a</td></tr>
for tr in down_soup.select('tr'):
tmp = tr.select('.filename')
if len(tmp) != 1:
continue
href = tmp[0].select('a')[0]['href']
hash_elem = tr.select('.hash')[0]
hash_elem.string = getFileMd5(os.path.abspath(os.path.join('binaries', href))) or '???'
tmp_soup = templ_soup.select('#site-middle')[0]
tmp_soup.clear()
for i in down_soup.select('body')[0]:
tmp_soup.append(i)
tmp_soup['class'] = 'content'
return templ_soup
def generateGuide():
templ_soup = validateAndParseHtml(readFile('template.html'))
setNavSelected(templ_soup, 'Guide')
title_elem = templ_soup.select('#template-title')[0]
del title_elem['id']
title_elem.string = 'Duktape Programmer\'s Guide'
# nav
res = []
navlinks = []
navlinks.append(['#introduction', 'Introduction'])
navlinks.append(['#gettingstarted', 'Getting started'])
navlinks.append(['#programming', 'Programming model'])
navlinks.append(['#types', 'Stack types'])
navlinks.append(['#typealgorithms', 'Type algorithms'])
navlinks.append(['#duktapebuiltins', 'Duktape built-ins'])
navlinks.append(['#es6features', 'Ecmascript E6 features'])
navlinks.append(['#custombehavior', 'Custom behavior'])
navlinks.append(['#customjson', 'Custom JSON formats'])
navlinks.append(['#errorobjects', 'Error objects'])
navlinks.append(['#functionobjects', 'Function objects'])
navlinks.append(['#finalization', 'Finalization'])
navlinks.append(['#coroutines', 'Coroutines'])
navlinks.append(['#propertyvirtualization', 'Property virtualization'])
navlinks.append(['#compiling', 'Compiling'])
navlinks.append(['#performance', 'Performance'])
navlinks.append(['#portability', 'Portability'])
navlinks.append(['#compatibility', 'Compatibility'])
navlinks.append(['#limitations', 'Limitations'])
navlinks.append(['#comparisontolua', 'Comparison to Lua'])
res.append('<ul>')
for nav in navlinks:
res.append('<li><a href="' + htmlEscape(nav[0]) + '">' + htmlEscape(nav[1]) + '</a></li>')
res.append('</ul>')
nav_soup = validateAndParseHtml('\n'.join(res))
tmp_soup = templ_soup.select('#site-middle-nav')[0]
tmp_soup.clear()
for i in nav_soup.select('body')[0]:
tmp_soup.append(i)
# content
res = []
res += [ '<div class="main-title"><strong>Duktape Programmer\'s Guide</strong></div>' ]
res += processRawDoc('guide/intro.html')
res += processRawDoc('guide/gettingstarted.html')
res += processRawDoc('guide/programming.html')
res += processRawDoc('guide/stacktypes.html')
res += processRawDoc('guide/typealgorithms.html')
res += processRawDoc('guide/duktapebuiltins.html')
res += processRawDoc('guide/es6features.html')
res += processRawDoc('guide/custombehavior.html')
res += processRawDoc('guide/customjson.html')
res += processRawDoc('guide/errorobjects.html')
res += processRawDoc('guide/functionobjects.html')
res += processRawDoc('guide/finalization.html')
res += processRawDoc('guide/coroutines.html')
res += processRawDoc('guide/propertyvirtualization.html')
res += processRawDoc('guide/compiling.html')
res += processRawDoc('guide/performance.html')
res += processRawDoc('guide/portability.html')
res += processRawDoc('guide/compatibility.html')
res += processRawDoc('guide/limitations.html')
res += processRawDoc('guide/luacomparison.html')
content_soup = validateAndParseHtml('\n'.join(res))
tmp_soup = templ_soup.select('#site-middle-content')[0]
tmp_soup.clear()
for i in content_soup.select('body')[0]:
tmp_soup.append(i)
tmp_soup['class'] = 'content'
return templ_soup
def generateStyleCss():
styles = [
'reset.css',
'style-html.css',
'style-content.css',
'style-top.css',
'style-middle.css',
'style-bottom.css',
'style-index.css',
'style-download.css',
'style-api.css',
'style-guide.css',
'highlight.css'
]
style = ''
for i in styles:
style += '/* === %s === */\n' % i
style += readFile(i)
return style
def postProcess(soup, includeDir, autoAnchors=False, headingLinks=False, duktapeVersion=None):
# read in source snippets from include files
if True:
transformReadIncludes(soup, includeDir)
# version number
if True:
transformVersionNumber(soup, duktapeVersion)
# current date
if True:
transformCurrentDate(soup)
# add <hr> elements before all <h1> elements to improve readability
# in text browsers
if True:
transformAddHrBeforeH1(soup)
# add automatic anchors to all headings (as long as they don't conflict
# with any manually assigned "long term" ids)
if autoAnchors:
transformAddAutoAnchorsNumbered(soup)
if headingLinks:
transformAddHeadingLinks(soup)
if colorize:
transformColorizeCode(soup, 'c-code', 'c')
transformColorizeCode(soup, 'ecmascript-code', 'javascript')
if fancy_stack:
transformFancyStacks(soup)
if remove_fixme:
transformRemoveClass(soup, 'fixme')
return soup
def writeFile(name, data):
f = open(name, 'wb')
f.write(data)
f.close()
def scrapeDuktapeVersion():
f = open(os.path.join('..', 'src', 'duktape.h'))
re_ver = re.compile(r'^#define DUK_VERSION\s+(\d+)L?\s*$')
for line in f:
line = line.strip()
m = re_ver.match(line)
if m is None:
continue
raw_ver = int(m.group(1))
str_ver = '%d.%d.%d' % ( raw_ver / 10000, raw_ver / 100 % 100, raw_ver % 100)
f.close()
if raw_ver is None:
raise Exception('cannot scrape Duktape version')
return str_ver, raw_ver
def main():
outdir = sys.argv[1]; assert(outdir)
apidocdir = 'api'
apitestdir = '../api-testcases'
guideincdir = '../examples/guide'
apiincdir = '../examples/api'
out_charset = 'utf-8'
releases_filename = '../RELEASES.txt'
duk_verstr, duk_verint = scrapeDuktapeVersion()
print 'Scraped version number: ' + duk_verstr
print 'Generating style.css'
data = generateStyleCss()
writeFile(os.path.join(outdir, 'style.css'), data)
#writeFile(os.path.join(outdir, 'reset.css'), readFile('reset.css'))
#writeFile(os.path.join(outdir, 'highlight.css'), readFile('highlight.css'))
print 'Generating api.html'
soup = generateApiDoc(apidocdir, apitestdir)
soup = postProcess(soup, apiincdir, autoAnchors=True, headingLinks=True, duktapeVersion=duk_verstr)
writeFile(os.path.join(outdir, 'api.html'), soup.encode(out_charset))
print 'Generating guide.html'
soup = generateGuide()
soup = postProcess(soup, guideincdir, autoAnchors=True, headingLinks=True, duktapeVersion=duk_verstr)
writeFile(os.path.join(outdir, 'guide.html'), soup.encode(out_charset))
print 'Generating index.html'
soup = generateIndexPage()
soup = postProcess(soup, None, duktapeVersion=duk_verstr)
writeFile(os.path.join(outdir, 'index.html'), soup.encode(out_charset))
print 'Generating download.html'
soup = generateDownloadPage(releases_filename)
soup = postProcess(soup, None, duktapeVersion=duk_verstr)
writeFile(os.path.join(outdir, 'download.html'), soup.encode(out_charset))
print 'Copying misc files'
for i in [ 'favicon.ico',
'startup_image_320x480.png',
'touch_icon_114x114.png',
'touch_icon_120x120.png',
'touch_icon_144x144.png',
'touch_icon_152x152.png',
'touch_icon_57x57.png',
'touch_icon_60x60.png',
'touch_icon_72x72.png' ]:
shutil.copyfile(os.path.join('./', i), os.path.join(outdir, i))
print 'Copying binaries'
for i in os.listdir('binaries'):
shutil.copyfile(os.path.join('binaries', i), os.path.join(outdir, i))
print 'Copying dukweb.js files'
for i in [ '../dukweb.js',
'../jquery-1.11.0.js',
'../dukweb/dukweb.css',
'../dukweb/dukweb.html' ]:
shutil.copyfile(os.path.join('./', i), os.path.join(outdir, os.path.basename(i)))
if __name__ == '__main__':
main()
|
andoma/duktape
|
website/buildsite.py
|
Python
|
mit
| 27,306
|
import time
import sys
def log_called_times_decorator(func):
def wrapper(*args):
wrapper.count += 1
# print "The function I modify has been called {0} times(s).".format(wrapper.count)
now = time.time()
if now - wrapper.last_log > wrapper.dt:
print '[DEBUG] In last %ds %s() was called %d times' % (wrapper.dt,func.__name__,wrapper.count)
wrapper.count = 0
wrapper.last_log = now
return func(*args)
wrapper.count = 0
wrapper.last_log = time.time()
wrapper.dt = 5
return wrapper
def print_progress( percent=None, x=0, max=100):
if not percent:
percent = x*100.0/max
sys.stdout.write('\r')
bars = int(percent / 5)
sys.stdout.write("[%-20s] %d%% " % ('='*bars, int(percent)))
sys.stdout.flush()
if __name__ == '__main__':
'''
@log_called_times_decorator
def ff():
print 'f'
while True:
ff()
time.sleep(1)
'''
print_progress(45)
print ''
print_progress(x=20,max=200)
|
michaellas/streaming-vid-to-gifs
|
src/utils.py
|
Python
|
mit
| 1,054
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from gitrepo.tests.unit.cli import clibase
PROJECTS_YAML = '''
- project: project_1
src-repo: https://src/path/to/repo_1
dst-repo: ssh://dst@127.0.0.1:9999/path/to/repo_1
branches:
- "*"
- project: project_2
src-repo: https://src/path/to/repo_2
dst-repo: ssh://dst@127.0.0.1:9999/path/to/repo_2
branches:
- master
'''
class TestSyncCommand(clibase.BaseCLITest):
@mock.patch('gitrepo.utils.file_exists', return_value=True)
def test_sync(self, _):
expected_path = '/tmp/fake_projects.yaml'
args = 'sync {file_path} -p project_1'.format(file_path=expected_path)
m_open = mock.mock_open(read_data=PROJECTS_YAML)
with mock.patch('gitrepo.utils.open', m_open, create=True):
self.exec_command(args)
m_open.assert_called_once_with(expected_path)
self.m_get_client.assert_called_once_with('sync', mock.ANY)
@mock.patch('sys.stderr')
def test_sync_fail(self, mocked_stderr):
args = 'sync'
self.assertRaises(SystemExit, self.exec_command, args)
self.assertIn('error',
mocked_stderr.write.call_args_list[-1][0][0])
|
tivaliy/git-repo
|
gitrepo/tests/unit/cli/test_sync.py
|
Python
|
apache-2.0
| 1,800
|
# Lint as: python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper module for retrieving git repository metadata."""
import datetime as dt
import functools
import pathlib
from typing import Optional
from . import subprocess_utils
@functools.lru_cache(maxsize=1)
def get_chromium_src_path() -> pathlib.Path:
"""Returns the root 'src' absolute path of this Chromium Git checkout.
Example Path: /home/username/git/chromium/src
Returns:
The absolute path to the 'src' root directory of the Chromium Git
checkout containing this file.
"""
_CHROMIUM_SRC_ROOT = pathlib.Path(__file__).parents[3].resolve(strict=True)
if _CHROMIUM_SRC_ROOT.name != 'src':
raise AssertionError(
f'_CHROMIUM_SRC_ROOT "{_CHROMIUM_SRC_ROOT}" should end in "src".')
try:
_assert_git_repository(str(_CHROMIUM_SRC_ROOT))
except (ValueError, RuntimeError):
raise AssertionError
return _CHROMIUM_SRC_ROOT
def get_head_commit_hash(git_repo: Optional[str] = None) -> str:
"""Gets the hash of the commit at HEAD for a Git repository.
This returns the full, non-abbreviated, SHA1 hash of the commit as a string
containing 40 hexadecimal characters. For example,
'632918ad686949a9bc5f17ee1b48fa48e81be645'.
Args:
git_repo:
The path to a Git repository's root directory; if not specified,
defaults to the Chromium Git repository.
Returns:
The SHA1 hash of the Git repository's commit at HEAD.
Raises
ValueError:
The path specified in the git_repo parameter is not a root
directory for a Git repository.
RuntimeError:
The path specified in the git_repo parameter contains an infinite
loop.
"""
if not git_repo:
git_repo = str(get_chromium_src_path())
_assert_git_repository(git_repo)
return subprocess_utils.run_command(
['git', 'show', '--no-patch', f'--pretty=format:%H'], cwd=git_repo)
def get_head_commit_datetime(git_repo: Optional[str] = None) -> dt.datetime:
"""Gets the datetime of the commit at HEAD for a Git repository in UTC.
The datetime returned contains timezone information (in timezone.utc) so
that it can be easily be formatted or converted (e.g., to local time) based
on the caller's needs.
Args:
git_repo:
The path to a Git repository's root directory; if not specified,
defaults to the Chromium Git repository.
Returns:
The datetime of the Git repository's commit at HEAD.
Raises
ValueError:
The path specified in the git_repo parameter is not a root
directory for a Git repository.
RuntimeError:
The path specified in the git_repo parameter contains an infinite
loop.
"""
if not git_repo:
git_repo = str(get_chromium_src_path())
_assert_git_repository(git_repo)
timestamp = subprocess_utils.run_command(
['git', 'show', '--no-patch', '--format=%ct'], cwd=git_repo)
return dt.datetime.fromtimestamp(float(timestamp), tz=dt.timezone.utc)
def _assert_git_repository(git_repo_root: str) -> None:
try:
repo_path = pathlib.Path(git_repo_root).resolve(strict=True)
except FileNotFoundError as err:
raise ValueError(
f'The Git repository root "{git_repo_root}" is invalid;'
f' {err.strerror}: "{err.filename}".')
if not repo_path.is_dir():
raise ValueError(
f'The Git repository root "{git_repo_root}" is invalid;'
f' not a directory.')
try:
git_internals_path = repo_path.joinpath('.git').resolve(strict=True)
except FileNotFoundError as err:
raise ValueError(
f'The path "{git_repo_root}" is not a root directory for a Git'
f' repository; {err.strerror}: "{err.filename}".')
if not repo_path.is_dir():
raise ValueError(
f'The Git repository root "{git_repo_root}" is invalid;'
f' {git_internals_path} is not a directory.')
|
nwjs/chromium.src
|
tools/android/python_utils/git_metadata_utils.py
|
Python
|
bsd-3-clause
| 4,240
|
from flask import Flask
from flask.ext.restful import Api, Resource
from peewee import SqliteDatabase
database = SqliteDatabase('/tmp/fangorn.db', threadlocals=True)
from models import User, Token, Folder, File
database.create_tables([User, Token, Folder, File], True)
app = Flask(__name__)
api = Api(app)
from users import RegistrationResource, AuthenticationResource, AuthenticatedResource
from folders import FolderResource, FolderInstance
api.add_resource(RegistrationResource, '/users/')
api.add_resource(AuthenticationResource, '/authenticate/')
api.add_resource(FolderResource, '/folders/')
api.add_resource(FolderInstance, '/folders/<int:id>/')
|
citruspi/Fangorn
|
api/__init__.py
|
Python
|
unlicense
| 659
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms_pages', '0017_auto_20160417_1450'),
]
operations = [
migrations.AlterField(
model_name='metadata',
name='office',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Office', blank=True),
),
migrations.AlterField(
model_name='metadata',
name='region',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Region', blank=True),
),
migrations.AlterField(
model_name='personmeta',
name='year',
field=models.IntegerField(null=True, choices=[(2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015)], blank=True, verbose_name='Рік декларації'),
),
]
|
dchaplinsky/declarations.com.ua
|
declarations_site/cms_pages/migrations/0018_auto_20170211_1550.py
|
Python
|
mit
| 1,036
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing prediction creation
"""
from __future__ import absolute_import
from bigmler.tests.world import (world, common_setup_module,
common_teardown_module,
teardown_class)
import bigmler.tests.basic_tst_prediction_steps as test_pred
def setup_module():
"""Setup for the module
"""
common_setup_module()
test = TestPrediction()
test.setup_scenario02()
test.setup_scenario06()
def teardown_module():
"""Teardown for the module
"""
common_teardown_module()
class TestPrediction(object):
def setup(self):
"""
Debug information
"""
print "\n-------------------\nTests in: %s\n" % __name__
def teardown(self):
"""Calling generic teardown for every method
"""
self.world = teardown_class()
print "\nEnd of tests in: %s\n-------------------\n" % __name__
def test_scenario01(self):
"""
Scenario: Successfully building test predictions from start with no headers:
Given I create BigML resources uploading train "<data>" file with no headers to test "<test>" with no headers and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file |
| ../data/iris_nh.csv | ../data/test_iris_nh.csv | ./scenario1_nh/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario01.__doc__
examples = [
['data/iris_nh.csv', 'data/test_iris_nh.csv', 'scenario1_nh/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_all_resources_with_no_headers(self, example[0], example[1], example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
def setup_scenario02(self):
"""
Scenario: Successfully building test predictions from start:
Given I create BigML resources uploading train "<data>" file to test "<test>" and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file |
| ../data/grades.csv | ../data/test_grades.csv | ./scenario1_r/predictions.csv | ./check_files/predictions_grades.csv |
| ../data/iris.csv | ../data/test_iris.csv | ./scenario1/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.setup_scenario02.__doc__
examples = [
['data/grades.csv', 'data/test_grades.csv', 'scenario1_r/predictions.csv', 'check_files/predictions_grades.csv'],
['data/iris.csv', 'data/test_iris.csv', 'scenario1/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_all_resources(self, example[0], example[1], example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
def test_scenario03(self):
"""
Scenario: Successfully building test predictions from source
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using source to test "<test>" and log predictions in "<output>"
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ./scenario2/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario03.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'scenario2/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_source(self, None, test=example[2], output=example[3])
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario04(self):
"""
Scenario: Successfully building test predictions from dataset
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using dataset to test "<test>" and log predictions in "<output>"
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ./scenario3/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario04.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'scenario3/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_dataset(self, None, test=example[2], output=example[3])
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario05(self):
"""
Scenario: Successfully building test predictions from model
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using model to test "<test>" and log predictions in "<output>"
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ./scenario4/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario05.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'scenario4/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_model(self, test=example[2], output=example[3])
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def setup_scenario06(self):
"""
Scenario: Successfully building test predictions from ensemble
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using ensemble of <number_of_models> models to test "<test>" and log predictions in "<output>"
And I check that the ensemble has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | number_of_models | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | 10 | ../data/test_iris.csv | ./scenario5/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.setup_scenario06.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}', '10', 'data/test_iris.csv', 'scenario5/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_ensemble(self, number_of_models=example[2], test=example[3], output=example[4])
test_pred.i_check_create_ensemble(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[5])
def test_scenario07(self):
"""
Scenario: Successfully building test predictions from models file
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I have previously executed "<scenario2>" or reproduce it with arguments <kwargs2>
And I create BigML resources using models in file "<models_file>" to test "<test>" and log predictions in "<output>"
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs |scenario2 | kwargs2 | models_file | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | scenario5| {"number_of_models": 10, "test": "../data/test_iris.csv", "output": "./scenario5/predictions.csv"} | ./scenario5/models | ../data/test_iris.csv | ./scenario6/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario07.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}',
'scenario5', '{"number_of_models": 10, "test": "data/test_iris.csv", "output": "scenario5/predictions.csv"}',
'scenario5/models', 'data/test_iris.csv', 'scenario6/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[2], example[3])
test_pred.i_create_resources_from_models_file(self, multi_label=None, models_file=example[4], test=example[5], output=example[6])
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[7])
def test_scenario08(self):
"""
Scenario: Successfully building test predictions from dataset file
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using dataset in file "<dataset_file>" to test "<test>" and log predictions in "<output>"
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | dataset_file | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | ./scenario1/dataset | ../data/test_iris.csv | ./scenario7/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario08.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}', 'scenario1/dataset', 'data/test_iris.csv', 'scenario7/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_dataset_file(self, dataset_file=example[2], test=example[3], output=example[4])
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[5])
def test_scenario09(self):
"""
Scenario: Successfully combining test predictions from existing directories
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I have previously executed "<scenario2>" or reproduce it with arguments <kwargs2>
Given I combine BigML predictions files in "<directory1>" and "<directory2>" into "<output>"
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs |scenario2 | kwargs2 | directory1 | directory2 | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | scenario5| {"number_of_models": 10, "output": "./scenario5/predictions.csv", "test": "../data/test_iris.csv"} | ./scenario1 | ./scenario5 | ./scenario8/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario09.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}',
'scenario5', '{"number_of_models": 10, "test": "data/test_iris.csv", "output": "scenario5/predictions.csv"}',
'scenario1', 'scenario5', 'scenario8/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[2], example[3])
test_pred.i_find_predictions_files(self, directory1=example[4], directory2=example[5], output=example[6])
test_pred.i_check_predictions(self, example[7])
def test_scenario10(self):
"""
Scenario: Successfully combining test predictions from existing directories
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I have previously executed "<scenario2>" or reproduce it with arguments <kwargs2>
And I combine BigML predictions files in "<directory1>" and "<directory2>" into "<output>" with method "<method>"
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs |scenario2 | kwargs2 | directory1 | directory2 | output |predictions_file | method |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | scenario5| {"number_of_models": 10, "output": "./scenario5/predictions.csv", "test": "../data/test_iris.csv"} | ./scenario1 | ./scenario5 | ./scenario9/predictions_c.csv | ./check_files/predictions_iris.csv | "confidence weighted" |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | scenario5| {"number_of_models": 10, "output": "./scenario5/predictions.csv", "test": "../data/test_iris.csv"} | ./scenario1 | ./scenario5 | ./scenario9/predictions_p.csv | ./check_files/predictions_iris_p.csv | "probability weighted" |
| scenario1_r| {"data": "../data/grades.csv", "output": "./scenario1r/predictions.csv", "test": "../data/test_grades.csv"} | scenario1_r| {"data": "../data/grades.csv", "output": "./scenario1_r/predictions.csv", "test": "../data/test_grades.csv"} | ./scenario1_r | ./scenario1_r | ./scenario10/predictions_c.csv | ./check_files/predictions_grades.csv | "confidence weighted" |
| scenario1_r| {"data": "../data/grades.csv", "output": "./scenario1r/predictions.csv", "test": "../data/test_grades.csv"} | scenario1_r| {"data": "../data/grades.csv", "output": "./scenario1_r/predictions.csv", "test": "../data/test_grades.csv"} | ./scenario1_r | ./scenario1_r | ./scenario10/predictions_p.csv | ./check_files/predictions_grades_p.csv | "probability weighted" |
"""
print self.test_scenario10.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}',
'scenario5', '{"number_of_models": 10, "output": "scenario5/predictions.csv", "test": "data/test_iris.csv"}',
'scenario1', 'scenario5', 'scenario9/predictions_c.csv', 'check_files/predictions_iris.csv', '"confidence weighted"'],
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}',
'scenario5', '{"number_of_models": 10, "output": "scenario5/predictions.csv", "test": "data/test_iris.csv"}',
'scenario1', 'scenario5', 'scenario9/predictions_p.csv', 'check_files/predictions_iris_p.csv', '"probability weighted"'],
['scenario1_r', '{"data": "data/grades.csv", "output": "scenario1_r/predictions.csv", "test": "data/test_grades.csv"}',
'scenario1_r', '{"data": "data/grades.csv", "output": "scenario1_r/predictions.csv", "test": "data/test_grades.csv"}',
'scenario1_r', 'scenario1_r', 'scenario10/predictions_c.csv', 'check_files/predictions_grades.csv', '"confidence weighted"'],
['scenario1_r', '{"data": "data/grades.csv", "output": "scenario1_r/predictions.csv", "test": "data/test_grades.csv"}',
'scenario1_r', '{"data": "data/grades.csv", "output": "scenario1_r/predictions.csv", "test": "data/test_grades.csv"}',
'scenario1_r', 'scenario1_r', 'scenario10/predictions_p.csv', 'check_files/predictions_grades_p.csv', '"probability weighted"']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[2], example[3])
test_pred.i_find_predictions_files_with_method(self, directory1=example[4], directory2=example[5], output=example[6], method=example[8])
test_pred.i_check_predictions(self, example[7])
def test_scenario11(self):
"""
Scenario: Successfully building test predictions from dataset specifying objective field and model fields
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using dataset, objective field <objective> and model fields <fields> to test "<test>" and log predictions in "<output>"
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file | objective | fields |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ./scenario11/predictions.csv | ./check_files/predictions_iris_b.csv | 0 | "petal length","petal width" |
"""
print self.test_scenario11.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'scenario11/predictions.csv', 'check_files/predictions_iris_b.csv', '0', '"petal length","petal width"']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_dataset_objective_model(self, objective=example[5], fields=example[6], test=example[2], output=example[3])
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario12(self):
"""
Scenario: Successfully building cross-validation from dataset
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create a BigML cross-validation with rate <rate> using the dataset in file "<dataset_file>" and log results in "<output>"
And I check that the models have been created
And I check that the evaluations have been created
Then the cross-validation json model info is like the one in "<cv_file>"
Examples:
|scenario | kwargs | rate | dataset_file | output |cv_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | 0.05 | ./scenario1/dataset | ./scenario12/cross-validation | ./check_files/cross_validation.json |
"""
print self.test_scenario12.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}', '0.05', 'scenario1/dataset', 'scenario12/cross-validation', 'check_files/cross_validation.json']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_cross_validation_from_dataset(self, rate=example[2], dataset_file=example[3], output=example[4])
test_pred.i_check_create_models(self)
test_pred.i_check_create_evaluations(self, number_of_evaluations=None)
test_pred.i_check_cross_validation(self, example[5])
def test_scenario13(self):
"""
Scenario: Successfully building a source with a given locale and storing its result
Given I create a BigML source from file "<data>" with locale "<locale>", field attributes "<field_attributes>" and types file "<types>" storing results in "<output>"
Then I check that the stored source file exists
And the locale of the source is "<bigml_locale>"
And the type of field "<field_id>" is "<type>"
And the label of field "<field_id>" is "<label>"
Examples:
|data | locale | field_attributes | types | output | bigml_locale | field_id | type | label
| ../data/iris.csv| es_ES.UTF-8 |../data/field_attributes.txt |../data/types.txt |./scenario13/store_file | es_ES | 000004 | text | species label
"""
print self.test_scenario13.__doc__
examples = [
['data/iris.csv', 'es_ES.UTF-8', 'data/field_attributes.txt', 'data/types.txt', 'scenario13/store_file', 'es_ES', '000004', 'text', 'species label']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_source_with_locale(self, data=example[0], locale=example[1], field_attributes=example[2], types=example[3], output=example[4])
test_pred.i_check_stored_source(self)
test_pred.i_check_source_locale(self, example[5])
test_pred.i_check_source_type(self, example[6], example[7])
test_pred.i_check_source_label(self, example[6], example[8])
def test_scenario14(self):
"""
Scenario: Successfully building test predictions from start with user-given separator:
Given I create BigML resources uploading train "<data>" file to test "<test>" and log predictions in "<output>" with "<separator>" as test field separator
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | separator | output |predictions_file |
| ../data/iris.csv | ../data/test_iris.tsv | "\t" |./scenario14/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario14.__doc__
examples = [
['data/iris.csv', 'data/test_iris.tsv', '"\t"', 'scenario14/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_all_resources_with_separator(self, data=example[0], test=example[1], output=example[3], separator=example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario15(self):
"""
Scenario: Successfully building test predictions from start with different prediction file format:
Given I create BigML resources uploading train "<data>" file to test "<test>" and log predictions in "<output>" with prediction options "<options>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |options |predictions_file |
| ../data/iris.csv | ../data/test_iris.csv |./scenario15/predictions.csv |--prediction-header --prediction-fields 'petal length,petal width' --prediction-info full | ./check_files/predictions_iris_h.csv |
"""
print self.test_scenario15.__doc__
examples = [
['data/iris.csv', 'data/test_iris.csv', 'scenario15/predictions.csv', '--prediction-header --prediction-fields \'petal length,petal width\' --prediction-info full', 'check_files/predictions_iris_h.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_all_resources_with_options(self, data=example[0], test=example[1], output=example[2], options=example[3])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario16(self):
"""
Scenario: Successfully building threshold test predictions from ensemble
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using ensemble of <number_of_models> models with replacement to test "<test>" and log predictions in "<output>"
And I check that the ensemble has been created
And I check that the predictions are ready
And I create BigML resources using the previous ensemble with different thresholds to test "<test>" and log predictions in "<output2>" and "<output3>"
Then local predictions for different thresholds in "<output2>" and "<output3>" are different
Examples:
|scenario | kwargs | number_of_models | test | output | output2 | output3
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | 10 | ../data/test_iris.csv | ./scenario16/predictions.csv | ./scenario16/predictions2.csv | ./scenario16/predictions3.csv
"""
print self.test_scenario16.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}', '10', 'data/test_iris.csv', 'scenario16/predictions.csv', 'scenario16/predictions2.csv', 'scenario16/predictions3.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_ensemble_with_replacement(self, number_of_models=example[2], test=example[3], output=example[4])
test_pred.i_check_create_ensemble(self)
test_pred.i_check_create_predictions(self)
test_pred.i_create_resources_from_ensemble_with_threshold(self, test=example[3], output2=example[5], output3=example[6])
test_pred.i_check_predictions_with_different_thresholds(self, example[5], example[6])
def test_scenario17(self):
"""
Scenario: Successfully building test predictions from local model
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using local model in "<scenario>" to test "<test>" and log predictions in "<output>"
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | ../data/test_iris.csv | ./scenario17/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario17.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}', 'data/test_iris.csv', 'scenario17/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_create_resources_from_local_model(self, directory=example[0], test=example[2], output=example[3])
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario18(self):
"""
Scenario: Successfully building test predictions from ensemble
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
Given I have previously executed "<scenario2>" or reproduce it with arguments <kwargs2>
And I create BigML resources using local ensemble of <number_of_models> models in "<scenario2>" to test "<test>" and log predictions in "<output>"
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|scenario | kwargs |scenario2 | kwargs2 | number_of_models | test | output |predictions_file |
| scenario1| {"data": "../data/iris.csv", "output": "./scenario1/predictions.csv", "test": "../data/test_iris.csv"} | scenario5| {"number_of_models": 10, "output": "./scenario5/predictions.csv", "test": "../data/test_iris.csv"} | 10 | ../data/test_iris.csv | ./scenario18/predictions.csv | ./check_files/predictions_iris.csv |
"""
print self.test_scenario18.__doc__
examples = [
['scenario1', '{"data": "data/iris.csv", "output": "scenario1/predictions.csv", "test": "data/test_iris.csv"}',
'scenario5', '{"number_of_models": 10, "output": "scenario5/predictions.csv", "test": "data/test_iris.csv"}',
'10', 'scenario5', 'data/test_iris.csv', 'scenario18/predictions.csv', 'check_files/predictions_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[2], example[3])
test_pred.i_create_resources_from_local_ensemble(self, number_of_models=example[4], directory=example[5], test=example[6], output=example[7])
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[8])
def test_scenario19(self):
"""
Scenario: Successfully building test predictions from start using median:
Given I create BigML resources uploading train "<data>" file using the median to test "<test>" and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file |
| ../data/grades.csv | ../data/test_grades.csv | ./scenario19/predictions.csv | ./check_files/predictions_grades_median.csv |
"""
print self.test_scenario19.__doc__
examples = [
['data/grades.csv', 'data/test_grades.csv', 'scenario19/predictions.csv', 'check_files/predictions_grades_median.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_all_resources_with_median(self, data=example[0], test=example[1], output=example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])
def test_scenario20(self):
"""
Scenario: Successfully building test predictions using median from ensemble
Given I create BigML resources from "<data>" using ensemble of <number_of_models> models to test "<test>" using median and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the ensemble has been created
And I check that the models in the ensembles have been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
|data | number_of_models | test | output | predictions_file |
| ../data/grades.csv| 5 | ../data/test_grades.csv | ./scenario20/predictions.csv | ./check_files/predictions_grades_median_e.csv |
"""
print self.test_scenario20.__doc__
examples = [
['data/grades.csv', '5', 'data/test_grades.csv', 'scenario20/predictions.csv', 'check_files/predictions_grades_median_e.csv']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_resources_from_ensemble_using_median(self, data=example[0], number_of_models=example[1], test=example[2], output=example[3])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_ensemble(self)
test_pred.i_check_create_models_in_ensembles(self, in_ensemble=True)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[4])
|
brokendata/bigmler
|
bigmler/tests/test_01_predictions.py
|
Python
|
apache-2.0
| 40,518
|
# MIT licensed
# Copyright (c) 2020 Chih-Hsuan Yen <yan12125 at gmail dot com>
import pytest
pytestmark = [pytest.mark.asyncio, pytest.mark.needs_net]
async def test_container(get_version):
assert await get_version("hello-world", {
"source": "container",
"container": "library/hello-world",
"include_regex": "linux",
}) == "linux"
async def test_container_paging(get_version):
assert await get_version("prometheus-operator", {
"source": "container",
"registry": "quay.io",
"container": "redhattraining/hello-world-nginx",
}) == "v1.0"
|
lilydjwg/nvchecker
|
tests/test_container.py
|
Python
|
mit
| 570
|
# -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from lxml import etree
from datetime import datetime, date, time
from decimal import Decimal
import locale
import unicodedata
import re
import pytz
from time import strftime
NAMESPACE_NFE = 'http://www.portalfiscal.inf.br/nfe'
NAMESPACE_CTE = 'http://www.portalfiscal.inf.br/cte'
NAMESPACE_SIG = 'http://www.w3.org/2000/09/xmldsig#'
NAMESPACE_NFSE = 'http://localhost:8080/WsNFe2/lote'
ABERTURA = '<?xml version="1.0" encoding="utf-8"?>'
locale.setlocale(locale.LC_ALL, b'pt_BR.UTF-8')
locale.setlocale(locale.LC_COLLATE, b'pt_BR.UTF-8')
class NohXML(object):
def __init__(self, *args, **kwargs):
self._xml = None
self.alertas = []
def _le_xml(self, arquivo):
if arquivo is None:
return False
if not isinstance(arquivo, basestring):
arquivo = etree.tounicode(arquivo)
#self._xml = arquivo
#return True
#elif arquivo is not None:
if arquivo is not None:
if isinstance(arquivo, basestring):
if isinstance(arquivo, str):
arquivo = unicode(arquivo.encode('utf-8'))
if '<' in arquivo:
self._xml = etree.fromstring(tira_abertura(arquivo).encode('utf-8'))
else:
arq = open(arquivo)
txt = b''.join(arq.readlines())
txt = unicode(txt.decode('utf-8'))
txt = tira_abertura(txt)
arq.close()
self._xml = etree.fromstring(txt)
else:
self._xml = etree.parse(arquivo)
return True
return False
def _preenche_namespace(self, tag, sigla_ns):
if sigla_ns != '':
sigla_sig = sigla_ns + ':sig'
sigla_ns = '/' + sigla_ns + ':'
tag = sigla_ns.join(tag.split('/')).replace(sigla_ns + sigla_ns, '/' + sigla_ns).replace(sigla_sig, 'sig')
return tag
def _le_nohs(self, tag, ns=None, sigla_ns='nfe'):
#
# Tenta ler a tag sem os namespaces
# Necessário para ler corretamente as tags de grupo reenraizadas
#
try:
nohs = self._xml.xpath(tag)
if len(nohs) >= 1:
return nohs
except:
pass
#
# Não deu certo, tem que botar mesmo os namespaces
#
namespaces = {'nfe': NAMESPACE_NFE, 'sig': NAMESPACE_SIG, 'nfse': NAMESPACE_NFSE, 'cte': NAMESPACE_CTE}
if ns is not None:
namespaces['res'] = ns
if '//NFe' in tag or ns == NAMESPACE_NFE:
sigla_ns = 'nfe'
elif '//CTe' in tag or ns == NAMESPACE_CTE:
sigla_ns = 'cte'
if not tag.startswith('//*/res'):
tag = self._preenche_namespace(tag, sigla_ns)
nohs = self._xml.xpath(tag, namespaces=namespaces)
if len(nohs) >= 1:
return nohs
else:
return None
def _le_noh(self, tag, ns=None, ocorrencia=1):
nohs = self._le_nohs(tag, ns)
if (nohs is not None) and (len(nohs) >= ocorrencia):
return nohs[ocorrencia-1]
else:
return None
def _le_tag(self, tag, propriedade=None, ns=None, ocorrencia=1):
noh = self._le_noh(tag, ns, ocorrencia)
if noh is None:
valor = ''
else:
if propriedade is None:
valor = noh.text
elif (noh.attrib is not None) and (len(noh.attrib) > 0):
valor = noh.attrib[propriedade]
else:
valor = ''
return valor
class ErroObrigatorio(Exception):
def __init__(self, codigo, nome, propriedade):
if propriedade:
self.value = 'No campo código ' + codigo + ', "' + nome + '", a propriedade "' + propriedade + '" é de envio obrigatório, mas não foi preenchida.'
else:
self.value = 'O campo código ' + codigo + ', "' + nome + '" é de envio obrigatório, mas não foi preenchido.'
def __str__(self):
return repr(self.value)
def __unicode__(self):
return unicode(self.value)
class TamanhoInvalido(Exception):
def __init__(self, codigo, nome, valor, tam_min=None, tam_max=None, dec_min=None, dec_max=None):
if tam_min:
self.value = 'O campo código ' + codigo + ', "' + nome + '", deve ter o tamanho mínimo de ' + unicode(tam_min) + ', mas o tamanho enviado foi ' + unicode(len(unicode(valor))) + ': ' + unicode(valor)
elif tam_max:
self.value = 'O campo código ' + codigo + ', "' + nome + '", deve ter o tamanho máximo de ' + unicode(tam_max) + ', mas o tamanho enviado foi ' + unicode(len(unicode(valor))) + ': ' + unicode(valor)
elif dec_min:
self.value = 'O campo código ' + codigo + ', "' + nome + '", deve ter o mínimo de ' + unicode(dec_min) + ' casas decimais, mas o enviado foi ' + unicode(len(unicode(valor))) + ': ' + unicode(valor)
elif dec_max:
self.value = 'O campo código ' + codigo + ', "' + nome + '", deve ter o máximo de ' + unicode(dec_max) + ' casas decimais, mas o enviado foi ' + unicode(len(unicode(valor))) + ': ' + unicode(valor)
def __str__(self):
return repr(self.value)
def __unicode__(self):
return unicode(self.value)
class ErroCaracterInvalido(Exception):
def __init__(self, codigo, nome, propriedade, valor, caracter):
if propriedade:
self.value = 'No campo código ' + codigo + ', "' + nome + '", a propriedade "' + propriedade + '" possui um caracter inválido: "' + caracter + '".'
else:
self.value = 'O campo código ' + codigo + ', "' + nome + '" possui um caracter inválido: "' + caracter + '".'
def __str__(self):
return repr(self.value)
def __unicode__(self):
return unicode(self.value)
class TagCaracter(NohXML):
def __init__(self, *args, **kwargs):
super(TagCaracter, self).__init__(*args, **kwargs)
self.codigo = ''
self.nome = ''
self._valor_string = ''
self.obrigatorio = True
self.tamanho = [None, None, None]
self.propriedade = None
self.namespace = None
self.namespace_obrigatorio = True
self.alertas = []
self.raiz = None
# Codigo para dinamizar a criacao de instancias de entidade,
# aplicando os valores dos atributos na instanciacao
for k, v in kwargs.items():
setattr(self, k, v)
if kwargs.has_key('valor'):
self.valor = kwargs['valor']
def _testa_obrigatorio(self, valor):
if self.obrigatorio and (not valor):
return ErroObrigatorio(self.codigo, self.nome, self.propriedade)
#raise ErroObrigatorio(self.codigo, self.nome, self.propriedade)
def _testa_tamanho_minimo(self, valor):
if self.tamanho[0] and (len(unicode(valor)) < self.tamanho[0]):
return TamanhoInvalido(self.codigo, self.nome, valor, tam_min=self.tamanho[0])
#raise TamanhoInvalido(self.codigo, self.nome, valor, tam_min=self.tamanho[0])
def _testa_tamanho_maximo(self, valor):
if self.tamanho[1] and (len(unicode(valor)) > self.tamanho[1]):
return TamanhoInvalido(self.codigo, self.nome, valor, tam_max=self.tamanho[1])
#raise TamanhoInvalido(self.codigo, self.nome, valor, tam_max=self.tamanho[1])
def _valida(self, valor):
self.alertas = []
if self._testa_obrigatorio(valor):
self.alertas.append(self._testa_obrigatorio(valor))
if self._testa_tamanho_minimo(valor):
self.alertas.append(self._testa_tamanho_minimo(valor))
if self._testa_tamanho_maximo(valor):
self.alertas.append(self._testa_tamanho_maximo(valor))
return self.alertas == []
def set_valor(self, novo_valor):
if novo_valor is not None:
#
# Remover caratceres inválidos
#
for c in novo_valor:
if c > 'ÿ':
raise ErroCaracterInvalido(self.codigo, self.nome, self.propriedade, novo_valor, c)
#
# É obrigatório remover os espaços no início e no final do valor
#
novo_valor = novo_valor.strip()
if self._valida(novo_valor):
self._valor_string = unicode(tirar_acentos(novo_valor))
else:
self._valor_string = ''
def get_valor(self):
return unicode(por_acentos(self._valor_string))
valor = property(get_valor, set_valor)
def __unicode__(self):
if (not self.obrigatorio) and (not self.valor):
texto = ''
else:
texto = '<%s' % self.nome
if self.namespace and self.namespace_obrigatorio:
texto += ' xmlns="%s"' % self.namespace
if self.propriedade:
texto += ' %s="%s">' % (self.propriedade, self._valor_string)
elif self.valor or (len(self.tamanho) == 3 and self.tamanho[2]):
texto += '>%s</%s>' % (self._valor_string, self.nome)
else:
texto += ' />'
return texto
def __repr__(self):
return self.__unicode__()
def get_xml(self):
return self.__unicode__()
def set_xml(self, arquivo, ocorrencia=1):
if self._le_xml(arquivo):
self.valor = self._le_tag(self.raiz + '/' + self.nome, propriedade=self.propriedade, ns=self.namespace, ocorrencia=ocorrencia)
xml = property(get_xml, set_xml)
def get_text(self):
if self.propriedade:
return '%s_%s=%s' % (self.nome, self.propriedade, self._valor_string)
else:
return '%s=%s' % (self.nome, self._valor_string)
text = property(get_text)
def get_txt(self):
if self.obrigatorio:
return self._valor_string
if self.valor:
return self._valor_string
return ''
txt = property(get_txt)
class TagBoolean(TagCaracter):
def __init__(self, **kwargs):
super(TagBoolean, self).__init__(**kwargs)
self._valor_boolean = None
# Codigo para dinamizar a criacao de instancias de entidade,
# aplicando os valores dos atributos na instanciacao
for k, v in kwargs.items():
setattr(self, k, v)
if kwargs.has_key('valor'):
self.valor = kwargs['valor']
def _testa_obrigatorio(self, valor):
# No caso da tag booleana, False deve ser tratado como preenchido
if self.obrigatorio and (valor is None):
return ErroObrigatorio(self.codigo, self.nome, self.propriedade)
def _valida(self, valor):
self.alertas = []
if self._testa_obrigatorio(valor):
self.alertas.append(self._testa_obrigatorio(valor))
return self.alertas == []
def set_valor(self, novo_valor):
if isinstance(novo_valor, basestring):
if novo_valor.lower() == 'true':
novo_valor = True
elif novo_valor.lower() == 'false':
novo_valor = False
else:
novo_valor = None
if isinstance(novo_valor, bool) and self._valida(novo_valor):
self._valor_boolean = novo_valor
if novo_valor == None:
self._valor_string = ''
elif novo_valor:
self._valor_string = 'true'
else:
self._valor_string = 'false'
else:
self._valor_boolean = None
self._valor_string = ''
def get_valor(self):
return self._valor_boolean
valor = property(get_valor, set_valor)
def __unicode__(self):
if (not self.obrigatorio) and (self.valor == None):
texto = ''
else:
texto = '<%s' % self.nome
if self.namespace:
texto += ' xmlns="%s"' % self.namespace
if self.propriedade:
texto += ' %s="%s">' % (self.propriedade, self._valor_string)
elif not self.valor == None:
texto += '>%s</%s>' % (self._valor_string, self.nome)
else:
texto += ' />'
return texto
class TagData(TagCaracter):
def __init__(self, **kwargs):
super(TagData, self).__init__(**kwargs)
self._valor_data = None
# Codigo para dinamizar a criacao de instancias de entidade,
# aplicando os valores dos atributos na instanciacao
for k, v in kwargs.items():
setattr(self, k, v)
if kwargs.has_key('valor'):
self.valor = kwargs['valor']
def _valida(self, valor):
self.alertas = []
if self._testa_obrigatorio(valor):
self.alertas.append(self._testa_obrigatorio(valor))
return self.alertas == []
def set_valor(self, novo_valor):
if isinstance(novo_valor, basestring):
if novo_valor:
novo_valor = datetime.strptime(novo_valor[:10], '%Y-%m-%d')
else:
novo_valor = None
if isinstance(novo_valor, (datetime, date,)) and self._valida(novo_valor):
self._valor_data = novo_valor
# Cuidado!!!
# Aqui não dá pra usar a função strftime pois em alguns
# casos a data retornada é 01/01/0001 00:00:00
# e a função strftime só aceita data com anos a partir de 1900
self._valor_string = '%04d-%02d-%02d' % (self._valor_data.year, self._valor_data.month, self._valor_data.day)
else:
self._valor_data = None
self._valor_string = ''
def get_valor(self):
return self._valor_data
valor = property(get_valor, set_valor)
def formato_danfe(self):
if self._valor_data is None:
return ''
else:
return self._valor_data.strftime('%d/%m/%Y')
class TagHora(TagData):
def set_valor(self, novo_valor):
if isinstance(novo_valor, basestring):
if novo_valor:
novo_valor = datetime.strptime(novo_valor, '%H:%M:%S')
else:
novo_valor = None
if isinstance(novo_valor, (datetime, time,)) and self._valida(novo_valor):
self._valor_data = novo_valor
# Cuidado!!!
# Aqui não dá pra usar a função strftime pois em alguns
# casos a data retornada é 01/01/0001 00:00:00
# e a função strftime só aceita data com anos a partir de 1900
self._valor_string = '%02d:%02d:%02d' % (self._valor_data.hour, self._valor_data.minute, self._valor_data.second)
else:
self._valor_data = None
self._valor_string = ''
def get_valor(self):
return self._valor_data
valor = property(get_valor, set_valor)
def formato_danfe(self):
if self._valor_data is None:
return ''
else:
return self._valor_data.strftime('%H:%M:%S')
class TagDataHora(TagData):
def set_valor(self, novo_valor):
if isinstance(novo_valor, basestring):
if novo_valor:
#
# Força a ignorar os microssegundos enviados pelo webservice
# de distribuição de DF-e
#
if '.' in novo_valor:
novo_valor = novo_valor.split('.')[0]
novo_valor = datetime.strptime(novo_valor, '%Y-%m-%dT%H:%M:%S')
else:
novo_valor = None
if isinstance(novo_valor, datetime) and self._valida(novo_valor):
self._valor_data = novo_valor
self._valor_data = self._valor_data.replace(microsecond=0)
# Cuidado!!!
# Aqui não dá pra usar a função strftime pois em alguns
# casos a data retornada é 01/01/0001 00:00:00
# e a função strftime só aceita data com anos a partir de 1900
self._valor_string = '%04d-%02d-%02dT%02d:%02d:%02d' % (self._valor_data.year, self._valor_data.month, self._valor_data.day,
self._valor_data.hour, self._valor_data.minute, self._valor_data.second)
else:
self._valor_data = None
self._valor_string = ''
def get_valor(self):
return self._valor_data
valor = property(get_valor, set_valor)
def formato_danfe(self):
if self._valor_data is None:
return ''
else:
return self._valor_data.strftime('%d/%m/%Y %H:%M:%S')
def fuso_horario_sistema():
diferenca = int(strftime('%z')) // 100
if diferenca < 0:
return pytz.timezone('Etc/GMT+' + str(diferenca * -1))
if diferenca > 0:
return pytz.timezone('Etc/GMT-' + str(diferenca))
return pytz.UTC
class TagDataHoraUTC(TagData):
def __init__(self, **kwargs):
super(TagDataHoraUTC, self).__init__(**kwargs)
#
# Expressão de validação do formato (vinda do arquivo leiauteSRE_V1.00.xsd
# Alterada para tornar a informação do fuso horário opcional
#
self._validacao = re.compile(r'(((20(([02468][048])|([13579][26]))-02-29))|(20[0-9][0-9])-((((0[1-9])|(1[0-2]))-((0[1-9])|(1\d)|(2[0-8])))|((((0[13578])|(1[02]))-31)|(((0[1,3-9])|(1[0-2]))-(29|30)))))T(20|21|22|23|[0-1]\d):[0-5]\d:[0-5]\d(-0[1-4]:00)?')
self._valida_fuso = re.compile(r'.*-0[0-9]:00$')
self._brasilia = pytz.timezone('America/Sao_Paulo')
self.fuso_horario = 'America/Sao_Paulo'
def set_valor(self, novo_valor):
if isinstance(novo_valor, basestring):
if self._validacao.match(novo_valor):
if self._valida_fuso.match(novo_valor):
#
# Extrai e determina qual o fuso horário informado
#
self.fuso_horario = novo_valor[19:]
novo_valor = novo_valor[:19]
#
# Converte a data sem fuso horário para o fuso horário atual
# Isso é necessário pois a função strptime ignora a informação
# do fuso horário na string de entrada
#
novo_valor = self.fuso_horario.localize(datetime.strptime(novo_valor, '%Y-%m-%dT%H:%M:%S'))
else:
novo_valor = None
if isinstance(novo_valor, datetime) and self._valida(novo_valor):
if not novo_valor.tzinfo:
novo_valor = fuso_horario_sistema().localize(novo_valor)
novo_valor = pytz.UTC.normalize(novo_valor)
novo_valor = self._brasilia.normalize(novo_valor)
self._valor_data = novo_valor
self._valor_data = self._valor_data.replace(microsecond=0)
try:
self._valor_data = self.fuso_horario.localize(self._valor_data)
except:
pass
# Cuidado!!!
# Aqui não dá pra usar a função strftime pois em alguns
# casos a data retornada é 01/01/0001 00:00:00
# e a função strftime só aceita data com anos a partir de 1900
#self._valor_string = '%04d-%02d-%02dT%02d:%02d:%02d' % (self._valor_data.year, self._valor_data.month, self._valor_data.day,
# self._valor_data.hour, self._valor_data.minute, self._valor_data.second)
self._valor_string = self._valor_data.isoformat()
else:
self._valor_data = None
self._valor_string = ''
def get_valor(self):
return self._valor_data
valor = property(get_valor, set_valor)
def set_fuso_horaro(self, novo_valor):
if novo_valor in pytz.country_timezones['br']:
self._fuso_horario = pytz.timezone(novo_valor)
#
# Nos valores abaixo, não entendi ainda até agora, mas para o resultado
# correto é preciso usar GMT+ (mais), não (menos) como seria de se
# esperar...
#
elif novo_valor == '-04:00' or novo_valor == '-0400':
self._fuso_horario = pytz.timezone('Etc/GMT+4')
elif novo_valor == '-03:00' or novo_valor == '-0300':
self._fuso_horario = pytz.timezone('Etc/GMT+3')
elif novo_valor == '-02:00' or novo_valor == '-0200':
self._fuso_horario = pytz.timezone('Etc/GMT+2')
elif novo_valor == '-01:00' or novo_valor == '-0100':
self._fuso_horario = pytz.timezone('Etc/GMT+1')
def get_fuso_horario(self):
return self._fuso_horario
fuso_horario = property(get_fuso_horario, set_fuso_horaro)
def formato_danfe(self):
if self._valor_data is None:
return ''
else:
valor = self._brasilia.normalize(self._valor_data).strftime('%d/%m/%Y %H:%M:%S %Z (%z)')
#
# Troca as siglas:
# BRT - Brasília Time -> HOB - Horário Oficial de Brasília
# BRST - Brasília Summer Time -> HVOB - Horário de Verão Oficial de Brasília
# AMT - Amazon Time -> HOA - Horário Oficial da Amazônia
# AMST - Amazon Summer Time -> HVOA - Horário de Verão Oficial da Amazônia
# FNT - Fernando de Noronha Time -> HOFN - Horário Oficial de Fernando de Noronha
#
valor = valor.replace('(-0100)', '(-01:00)')
valor = valor.replace('(-0200)', '(-02:00)')
valor = valor.replace('(-0300)', '(-03:00)')
valor = valor.replace('(-0400)', '(-04:00)')
valor = valor.replace('BRT', 'HOB')
valor = valor.replace('BRST', 'HVOB')
valor = valor.replace('AMT', 'HOA')
valor = valor.replace('AMST', 'HVOA')
valor = valor.replace('FNT', 'HOFN')
return valor
class TagInteiro(TagCaracter):
def __init__(self, **kwargs):
super(TagInteiro, self).__init__(**kwargs)
self._valor_inteiro = 0
self._valor_string = '0'
# Codigo para dinamizar a criacao de instancias de entidade,
# aplicando os valores dos atributos na instanciacao
for k, v in kwargs.items():
setattr(self, k, v)
if kwargs.has_key('valor'):
self.valor = kwargs['valor']
def set_valor(self, novo_valor):
if isinstance(novo_valor, basestring):
if novo_valor:
novo_valor = int(novo_valor)
else:
novo_valor = 0
if isinstance(novo_valor, (int, long, Decimal)) and self._valida(novo_valor):
self._valor_inteiro = novo_valor
self._valor_string = unicode(self._valor_inteiro)
if (len(self.tamanho) >= 3) and self.tamanho[2] and (len(self._valor_string) < self.tamanho[2]):
self._valor_string = self._valor_string.rjust(self.tamanho[2], '0')
else:
self._valor_inteiro = 0
self._valor_string = '0'
def get_valor(self):
return self._valor_inteiro
valor = property(get_valor, set_valor)
def formato_danfe(self):
if not (self.obrigatorio or self._valor_inteiro):
return ''
return locale.format('%d', self._valor_inteiro, grouping=True)
class TagDecimal(TagCaracter):
def __init__(self, *args, **kwargs):
self._valor_decimal = Decimal('0.0')
self._valor_string = '0.0'
self.decimais = [None, None, None]
super(TagDecimal, self).__init__(*args, **kwargs)
self._valor_decimal = Decimal('0.0')
self._valor_string = self._formata(self._valor_decimal)
self.decimais = [None, None, None]
# Codigo para dinamizar a criacao de instancias de entidade,
# aplicando os valores dos atributos na instanciacao
for k, v in kwargs.items():
setattr(self, k, v)
def _parte_inteira(self, valor=None):
if valor is None:
valor = self._valor_decimal
valor = unicode(valor).strip()
if '.' in valor:
valor = valor.split('.')[0]
return valor
def _parte_decimal(self, valor=None):
if valor is None:
valor = self._valor_decimal
valor = unicode(valor).strip()
if '.' in valor:
valor = valor.split('.')[1]
else:
valor = ''
return valor
def _formata(self, valor):
texto = self._parte_inteira(valor)
dec = self._parte_decimal(valor)
if not dec:
dec = '0'
# Tamanho mínimo das casas decimais
if (len(self.decimais) >= 3) and self.decimais[2] and (len(dec) < self.decimais[2]):
dec = dec.ljust(self.decimais[2], '0')
texto += '.' + dec
return texto
def _testa_decimais_minimo(self, decimal):
if self.decimais[0] and (len(decimal) < self.decimais[0]):
#return TamanhoInvalido(self.codigo, self.nome, decimal, dec_min=self.decimais[0])
raise TamanhoInvalido(self.codigo, self.nome, decimal, dec_min=self.decimais[0])
def _testa_decimais_maximo(self, decimal):
if self.decimais[1] and (len(decimal) > self.decimais[1]):
#return TamanhoInvalido(self.codigo, self.nome, decimal, dec_max=self.decimais[1])
raise TamanhoInvalido(self.codigo, self.nome, decimal, dec_max=self.decimais[1])
def _valida(self, valor):
self.alertas = []
if self._testa_obrigatorio(valor):
self.alertas.append(self._testa_obrigatorio(valor))
inteiro = self._parte_inteira(valor)
decimal = self._parte_decimal(valor)
if self._testa_tamanho_minimo(inteiro):
self.alertas.append(self._testa_tamanho_minimo(inteiro))
if self._testa_tamanho_maximo(inteiro):
self.alertas.append(self._testa_tamanho_maximo(inteiro))
#
# Analisando as exp.reg. de validação das tags com decimais,
# parece haver um número máximo de casas decimais, mas as tags
# podem ser enviadas sem nenhuma casa decimal, então, não
# há um mínimo de casas decimais
#
#if self._testa_decimais_minimo(decimal):
# self.alertas.append(self._testa_decimais_minimo(decimal))
if self._testa_decimais_maximo(decimal):
self.alertas.append(self._testa_decimais_maximo(decimal))
return self.alertas == []
def set_valor(self, novo_valor):
if isinstance(novo_valor, basestring):
if novo_valor:
novo_valor = Decimal(novo_valor)
else:
novo_valor = Decimal('0.0')
if isinstance(novo_valor, (int, long, Decimal)) and self._valida(novo_valor):
self._valor_decimal = Decimal(novo_valor)
self._valor_string = self._formata(self._valor_decimal)
else:
self._valor_decimal = Decimal('0.0')
self._valor_string = self._formata(self._valor_decimal)
def get_valor(self):
return self._valor_decimal
valor = property(get_valor, set_valor)
def formato_danfe(self):
if not (self.obrigatorio or self._valor_decimal):
return ''
# Tamanho mínimo das casas decimais
if (len(self.decimais) >= 3) and self.decimais[2]:
if len(self._parte_decimal()) <= self.decimais[2]:
formato = '%.' + unicode(self.decimais[2]) + 'f'
else:
formato = '%.' + unicode(len(self._parte_decimal())) + 'f'
else:
formato = '%.2f'
return locale.format(formato, self._valor_decimal, grouping=True)
class XMLNFe(NohXML):
def __init__(self, *args, **kwargs):
super(XMLNFe, self).__init__(*args, **kwargs)
self._xml = None
self.alertas = []
self.arquivo_esquema = None
self.caminho_esquema = None
def get_xml(self):
self.alertas = []
return ''
def validar(self):
arquivo_esquema = self.caminho_esquema + self.arquivo_esquema
# Aqui é importante remover a declaração do encoding
# para evitar erros de conversão unicode para ascii
xml = tira_abertura(self.xml).encode('utf-8')
esquema = etree.XMLSchema(etree.parse(arquivo_esquema))
esquema.validate(etree.fromstring(xml))
namespace = '{http://www.portalfiscal.inf.br/nfe}'
return "\n".join([x.message.replace(namespace, '') for x in esquema.error_log])
def le_grupo(self, raiz_grupo, classe_grupo, sigla_ns='nfe'):
tags = []
grupos = self._le_nohs(raiz_grupo, sigla_ns=sigla_ns)
if grupos is not None:
tags = [classe_grupo() for g in grupos]
for i in range(len(grupos)):
tags[i].xml = grupos[i]
return tags
def tirar_acentos(texto):
if not texto:
return texto
texto = texto.replace('&', '&')
texto = texto.replace('<', '<')
texto = texto.replace('>', '>')
texto = texto.replace('"', '"')
texto = texto.replace("'", ''')
#
# Trocar ENTER e TAB
#
texto = texto.replace('\t', ' ')
texto = texto.replace('\n', '| ')
# Remove espaços seguidos
# Nem pergunte...
while ' ' in texto:
texto = texto.replace(' ', ' ')
return texto
def por_acentos(texto):
if not texto:
return texto
texto = texto.replace(''', "'")
texto = texto.replace(''', "'")
texto = texto.replace('"', '"')
texto = texto.replace('>', '>')
texto = texto.replace('<', '<')
texto = texto.replace('&', '&')
texto = texto.replace('&APOS;', "'")
texto = texto.replace('"', '"')
texto = texto.replace('>', '>')
texto = texto.replace('<', '<')
texto = texto.replace('&', '&')
return texto
def tira_abertura(texto):
#aberturas = (
#'<?xml version="1.0" encoding="utf-8"?>',
#'<?xml version="1.0" encoding="utf-8" ?>',
#'<?xml version="1.0" encoding="utf-8" standalone="no"?>',
#'<?xml version="1.0" encoding="utf-8" standalone="no" ?>',
#'<?xml version="1.0" encoding="utf-8" standalone="yes"?>',
#'<?xml version="1.0" encoding="utf-8" standalone="yes" ?>',
#'<?xml version="1.0" encoding="UTF-8"?>',
#'<?xml version="1.0" encoding="UTF-8" ?>',
#'<?xml version="1.0" encoding="UTF-8" standalone="no"?>',
#'<?xml version="1.0" encoding="UTF-8" standalone="no" ?>',
#'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
#'<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>',
#"<?xml version='1.0' encoding='utf-8'?>",
#"<?xml version='1.0' encoding='utf-8' ?>",
#"<?xml version='1.0' encoding='utf-8' standalone='no'?>",
#"<?xml version='1.0' encoding='utf-8' standalone='no' ?>",
#"<?xml version='1.0' encoding='utf-8' standalone='yes'?>",
#"<?xml version='1.0' encoding='utf-8' standalone='yes' ?>",
#"<?xml version='1.0' encoding='UTF-8'?>",
#"<?xml version='1.0' encoding='UTF-8' ?>",
#"<?xml version='1.0' encoding='UTF-8' standalone='no'?>",
#"<?xml version='1.0' encoding='UTF-8' standalone='no' ?>",
#"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>",
#"<?xml version='1.0' encoding='UTF-8' standalone='yes' ?>",
#)
#for a in aberturas:
#texto = texto.replace(a, '')
if '?>' in texto:
texto = texto.split('?>')[1:]
texto = ''.join(texto)
return texto
def _tipo_para_string(valor, tipo, obrigatorio, dec_min):
if (not obrigatorio) and (not valor):
return '', ''
decimais = ''
# Cuidado!!!
# Aqui não dá pra usar a função strftime pois em alguns
# casos a data retornada é 01/01/0001 00:00:00
# e a função strftime só aceita data com anos a partir de 1900
if (tipo in ('d', 'h', 'dh')) and isinstance(valor, (datetime, date, time,)):
valor = formata_datahora(valor, tipo)
elif (tipo == 'n') and isinstance(valor, (int, long, float, Decimal)):
if isinstance(valor, (int, long, float)):
valor = Decimal(unicode(valor))
valor = unicode(valor).strip()
if '.' in valor:
decimais = valor.split('.')[1]
if dec_min:
decimais = decimais.ljust(dec_min, '0')
if '.' in valor:
valor = valor.split('.')[0]
valor += '.' + decimais
return valor, decimais
def _string_para_tipo(valor, tipo):
if valor == None:
return valor
if tipo == 'd':
valor = datetime.strptime(valor, b'%Y-%m-%d')
elif tipo == 'h':
valor = datetime.strptime(valor, b'%H:%M:%S')
elif tipo == 'dh':
valor = datetime.strptime(valor, b'%Y-%m-%dT%H:%M:%S')
elif tipo == 'n':
valor = Decimal(valor)
return valor
def formata_datahora(valor, tipo):
if (tipo == 'd') and isinstance(valor, (datetime, date,)):
valor = '%04d-%02d-%02d' % (valor.year, valor.month, valor.day)
elif (tipo == 'h') and isinstance(valor, (datetime, time,)):
valor = '%02d:%02d:%02d' % (valor.hour, valor.minute, valor.second)
valor = valor.strftime('%H:%M:%S')
elif (tipo == 'dh') and isinstance(valor, datetime):
valor = '%04d-%02d-%02dT%02d:%02d:%02d' % (valor.year, valor.month, valor.day, valor.hour, valor.minute, valor.second)
return valor
def somente_ascii(funcao):
'''
Usado como decorator para a nota fiscal eletrônica de servicos
'''
def converter_para_ascii_puro(*args, **kwargs):
return unicodedata.normalize(b'NFKD', funcao(*args, **kwargs)).encode('ascii', 'ignore')
return converter_para_ascii_puro
|
rodrigoasmacedo/PySPED
|
pysped/xml_sped/base.py
|
Python
|
lgpl-2.1
| 35,708
|
import Tkinter
import ttk
import ttkstyles
import periodic
class QuitButton(ttk.Button):
"""docstring for QuitButton"""
def __init__(self, parent):
ttk.Button.__init__(
self,
parent,
text="Quit",
style="Quit.TButton"
)
class ElementLabel(ttk.Label):
"""docstring for ElementLabel"""
def __init__(self, parent, element):
ttk.Label.__init__(
self,
parent,
style="Element.TLabel")
ttk.Label(
self,
text=element["name"],
style="Name.Element.TLabel"
).pack()
ttk.Label(
self,
text=element["atomic_no"],
style="AtomicNo.Element.TLabel"
).pack()
ttk.Label(
self,
text=element["symbol"],
style="Symbol.Element.TLabel"
).pack(side=Tkinter.TOP)
ttk.Label(
self,
text=element["atomic_wt"],
style="AtomicWt.Element.TLabel"
).pack()
class Application(ttk.Frame):
"""docstring for Application"""
def __init__(self, parent):
ttk.Frame.__init__(self, parent)
ttkstyles.load_styles()
self.pack()
self.create_widgets()
def create_widgets(self):
def print_word(event):
self.word_holder2.destroy()
self.word_holder2 = ttk.Frame(self.word_holder)
self.word_holder2.pack()
elements = periodic.get_periodics(self.word.get())
for match in elements:
holder = ttk.Label(self.word_holder2)
for el in match:
ElementLabel(
holder, el
).pack(side=Tkinter.LEFT, padx=5, pady=5)
holder.pack()
self.word = Tkinter.StringVar()
entry = ttk.Entry(self, textvariable=self.word)
entry.bind("<KeyRelease>", print_word)
entry.pack()
self.word_holder = ttk.Frame(self)
self.word_holder.pack()
self.word_holder2 = ttk.Frame(self.word_holder)
self.word_holder2.pack()
if __name__ == "__main__":
root = Tkinter.Tk()
root.geometry(
"".join(
[
str(root.winfo_screenwidth()), "x",
str(root.winfo_screenheight()),
"+0+0"
]
)
)
app = Application(parent=root)
root.mainloop()
|
ktbartolotta/periodic
|
main.py
|
Python
|
mit
| 2,480
|
from celery.task import task
@task(name="c.unittest.SomeAppTask")
def SomeAppTask(**kwargs):
return 42
|
mzdaniel/oh-mainline
|
vendor/packages/django-celery/tests/someapp/tasks.py
|
Python
|
agpl-3.0
| 109
|
class Solution(object):
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
n = len(nums)
start = pre = theMax = 0
for i in range(1, n):
if nums[i] > nums[pre]:
if i - start > theMax:
theMax = i - start
pre = i
else:
start = pre = i
return theMax + 1
print(Solution().findLengthOfLCIS([2,2,2,2,2]))
|
wufangjie/leetcode
|
674. Longest Continuous Increasing Subsequence.py
|
Python
|
gpl-3.0
| 529
|
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import periodic_task
from oslo_service import threadgroup
from mistral import context as auth_ctx
from mistral.db.v2 import api as db_api_v2
from mistral import exceptions as exc
from mistral.rpc import clients as rpc
from mistral.services import security
from mistral.services import triggers
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# {periodic_task: thread_group}
_periodic_tasks = {}
def process_cron_triggers_v2(self, ctx):
LOG.debug("Processing cron triggers...")
for trigger in triggers.get_next_cron_triggers():
LOG.debug("Processing cron trigger: %s", trigger)
try:
# Setup admin context before schedule triggers.
ctx = security.create_context(
trigger.trust_id,
trigger.project_id
)
auth_ctx.set_ctx(ctx)
LOG.debug("Cron trigger security context: %s", ctx)
# Try to advance the cron trigger next_execution_time and
# remaining_executions if relevant.
modified = advance_cron_trigger(trigger)
# If cron trigger was not already modified by another engine.
if modified:
LOG.debug(
"Starting workflow '%s' by cron trigger '%s'",
trigger.workflow.name,
trigger.name
)
description = {
"description": (
"Workflow execution created by cron"
" trigger '(%s)'." % trigger.id
),
"triggered_by": {
"type": "cron_trigger",
"id": trigger.id,
"name": trigger.name,
}
}
rpc.get_engine_client().start_workflow(
trigger.workflow.name,
trigger.workflow.namespace,
None,
trigger.workflow_input,
description=json.dumps(description),
**trigger.workflow_params
)
except Exception:
# Log and continue to next cron trigger.
LOG.exception(
"Failed to process cron trigger %s",
str(trigger)
)
finally:
auth_ctx.set_ctx(None)
class MistralPeriodicTasks(periodic_task.PeriodicTasks):
def __init__(self, conf):
super(MistralPeriodicTasks, self).__init__(conf)
periodic_task_ = periodic_task.periodic_task(
spacing=CONF.cron_trigger.execution_interval,
run_immediately=True,
)
self.add_periodic_task(periodic_task_(process_cron_triggers_v2))
def advance_cron_trigger(t):
modified_count = 0
try:
# If the cron trigger is defined with limited execution count.
if t.remaining_executions is not None and t.remaining_executions > 0:
t.remaining_executions -= 1
# If this is the last execution.
if t.remaining_executions == 0:
modified_count = triggers.delete_cron_trigger(
t.name,
trust_id=t.trust_id,
delete_trust=False
)
else: # if remaining execution = None or > 0.
next_time = triggers.get_next_execution_time(
t.pattern,
t.next_execution_time
)
# Update the cron trigger with next execution details
# only if it wasn't already updated by a different process.
updated, modified_count = db_api_v2.update_cron_trigger(
t.name,
{
'next_execution_time': next_time,
'remaining_executions': t.remaining_executions
},
query_filter={
'next_execution_time': t.next_execution_time
}
)
except exc.DBEntityNotFoundError as e:
# Cron trigger was probably already deleted by a different process.
LOG.debug(
"Cron trigger named '%s' does not exist anymore: %s",
t.name, str(e)
)
# Return True if this engine was able to modify the cron trigger in DB.
return modified_count > 0
def setup():
tg = threadgroup.ThreadGroup()
pt = MistralPeriodicTasks(CONF)
ctx = auth_ctx.MistralContext(
user=None,
tenant=None,
auth_token=None,
is_admin=True
)
tg.add_dynamic_timer(
pt.run_periodic_tasks,
initial_delay=None,
periodic_interval_max=1,
context=ctx
)
_periodic_tasks[pt] = tg
return tg
def stop_all_periodic_tasks():
for tg in _periodic_tasks.values():
tg.stop()
_periodic_tasks.clear()
|
StackStorm/mistral
|
mistral/services/periodic.py
|
Python
|
apache-2.0
| 5,546
|
import xmlrpclib
import urllib2
import json
import datetime
import functools
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.conf import settings
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_exempt
from powwow.apps.models import AppSettings
def index(request):
return render_to_response('app.xml', {
'url_static': settings.STATIC_URL,
'app_url': settings.APP_URL,
})
def index_dev(request):
return render_to_response('app_dev.xml')
def local_dev(request):
return render_to_response('app_dev.html')
@csrf_exempt
def confluence(request):
spacekey = AppSettings.objects.get(name='conf_space')
pagetitle = AppSettings.objects.get(name='conf_page')
server = xmlrpclib.ServerProxy(settings.CONFLUENCE_API)
token = server.confluence1.login(
settings.CONFLUENCE_USER,
settings.CONFLUENCE_PASS
)
page = server.confluence1.getPage(
token,
spacekey.content,
pagetitle.content
)
if page is None:
response_text = "Could not find page %s:%s" % (spacekey, pagetitle)
return add_cors_headers(HttpResponse(response_text))
if request.method == 'POST':
for key,value in request.POST.iteritems():
if key == 'notes':
page['content'] = value
server.confluence1.storePage(token, page)
return add_cors_headers(HttpResponse("Saved"))
params = {'content': page['content']}
response = render_to_response('confluence.html', params)
response = add_cors_headers(response)
return response
def jira(request):
project = AppSettings.objects.get(name='jira_project')
session = jira_login()
#TODO the number of days back should be configurable
date = datetime.datetime.now() - datetime.timedelta(days=7)
date = date.strftime("%Y-%m-%d")
jql = "project = %s AND updated >= %s order by updated" %(
project.content, date
)
url = ("%s/search?jql=%s&startAt=0&maxResults=15&JSESSIONID=%s"
% (settings.JIRA_API, urllib2.quote(jql), session["JSESSIONID"]))
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', 'JSESSIONID=%s'
% session["JSESSIONID"]))
response = opener.open(url)
issues = json.loads(response.read())
issues_details = [
jira_issue(issue["key"], session) for issue in issues.get("issues")
]
response = render_to_response('jira.html', {'issues': issues_details})
response = add_cors_headers(response)
return response
@csrf_exempt
def jira_find_issue(request):
if request.method == 'POST':
project = AppSettings.objects.get(name='jira_project')
for key,value in request.POST.iteritems():
if key == 'issue':
issue = value.strip()
issue_info = jira_issue(project.content + '-' + issue)
if issue_info is None:
return add_cors_headers(HttpResponse("The issue you are looking \
for does not exist in the current project."))
response = render_to_response('jira_issue.html', {'issue': issue_info})
response = add_cors_headers(response)
return response
else:
return add_cors_headers(HttpResponse("You did not send any value to \
search for."))
def jira_issue(issue_id, session=None):
if session is None:
session = jira_login()
url = "%s/issue/%s" % (settings.JIRA_API, issue_id)
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', 'JSESSIONID=%s'
% session["JSESSIONID"]))
try:
response = opener.open(url)
except Exception:
return None
issue = json.loads(response.read())
issue["browse_url"] = "%s/%s" %(settings.JIRA_BROWSE_URL,issue.get("key"))
return issue
def jira_login():
url = "%s/session" % settings.JIRA_AUTH
values = {"username": settings.JIRA_USER, "password": settings.JIRA_PASS}
data = json.dumps(values)
headers = {'Content-type': 'application/json'}
req = urllib2.Request(url, data, headers)
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError, e:
return {}
session = json.loads(response.read())
return { session["session"]["name"]: session["session"]["value"] }
def github(request):
project = AppSettings.objects.get(name='github_project')
url = '%s/repos/%s/%s/commits' % (
settings.GITHUB_API, settings.GITHUB_USER, project.content
)
opener = urllib2.build_opener()
try:
res = opener.open(url)
except Exception:
return add_cors_headers(HttpResponse("Error trying to access GitHub!"))
commits = json.loads(res.read())
params = {'commits': commits, 'project': project.content,
'user':settings.GITHUB_USER}
response = render_to_response('github.html', params)
response = add_cors_headers(response)
return response
def add_cors_headers(response):
response['Access-Control-Allow-Origin'] = settings.ALLOWED_ORIGIN
response['Access-Control-Allow-Methods'] = 'POST, GET'
return response
|
pbs/powwow
|
powwow/apps/views.py
|
Python
|
bsd-3-clause
| 5,246
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The nealcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.mininode import *
from test_framework.test_framework import nealcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
direct_fetch_response_time = 0.05
class BaseNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_inv = None
self.last_headers = None
self.last_block = None
self.last_getdata = None
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, conn, message):
self.last_headers = message
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_close(self, conn):
self.disconnected = True
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
assert(wait_until(test_function, timeout=60))
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if self.last_inv != None:
compare_inv = [x.hash for x in self.last_inv.inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if self.last_headers != None:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
if hash_headers != expect_headers:
success = False
self.last_inv = None
self.last_headers = None
return success
# Syncing helpers
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
# "sendheaders" message.
class InvNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
# TestNode: This peer is the one we use for most of the testing.
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
class SendHeadersTest(nealcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
connect_nodes(self.nodes[0], 1)
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.p2p_connections:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = InvNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
print("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip, timeout=5)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256], timeout=5)
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
print("Part 1: success!")
print("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders(timeout=5)
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[ inv_node.send_block_inv(x.sha256) for x in blocks ]
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert_equal(inv_node.last_inv, None)
assert_equal(inv_node.last_headers, None)
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
print("Part 2: success!")
print("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
print("Part 3: success!")
print("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
print("Part 4: success!")
# Now deliver all those blocks we announced.
[ test_node.send_message(msg_block(x)) for x in blocks ]
print("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_getdata = None
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders(timeout=1)
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders(timeout=1)
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i%len(blocks)]])
test_node.wait_for_getheaders(timeout=1)
# Eventually this stops working.
with mininode_lock:
self.last_getheaders = None
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
with mininode_lock:
self.last_getheaders = True
print("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert_equal(inv_node.last_getdata, None)
if __name__ == '__main__':
SendHeadersTest().main()
|
appop/bitcoin
|
qa/rpc-tests/sendheaders.py
|
Python
|
mit
| 25,711
|
import sqlite3
class Point:
def __init__(self, x, y):
self.x, self.y = x, y
def adapt_point(point):
return "%f;%f" % (point.x, point.y)
sqlite3.register_adapter(Point, adapt_point)
con = sqlite3.connect(":memory:")
cur = con.cursor()
p = Point(4.0, -3.2)
cur.execute("select ?", (p,))
print(cur.fetchone()[0])
|
837468220/python-for-android
|
python3-alpha/python3-src/Doc/includes/sqlite3/adapter_point_2.py
|
Python
|
apache-2.0
| 331
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sw=4 tw=0 noet :
#
# Document: dims-training-manual
# This documentation build configuration file was created from
# a cookiecutter template. It is based on output derived from
# the output of sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from sphinx import __version__
# ReadTheDocs configuration setting:
on_rtd = os.environ.get('READTHEDOCS') == "True"
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DIMS Training Manual'
copyright = u'2014-2016, University of Washington'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.2'
# The full version, including alpha/beta/rc tags.
release = '0.3.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
if not on_rtd and os.environ.get('INCLUDETODOS') == "True":
todo_include_todos = True
else:
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
html_logo = 'UW-logo.png'
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
html_favicon = 'UW-logo-32x32.ico'
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by d efault.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration director y) that
# implements a search results scorer. If empty, the default will be use d.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'dims-training-manualdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# The following comes from
# https://github.com/rtfd/readthedocs.org/issues/416
# and http://www.utf8-chartable.de/unicode-utf8-table.pl?start=9472&names=-
#
'preamble': "".join((
'\usepackage{pifont}', # To get Dingbats
'\DeclareUnicodeCharacter{00A0}{ }', # NO-BREAK SPACE
'\DeclareUnicodeCharacter{2014}{\dash}', # LONG DASH
'\DeclareUnicodeCharacter{251C}{+}', # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\DeclareUnicodeCharacter{2514}{+}', # BOX DRAWINGS LIGHT UP AND RIGHT
'\DeclareUnicodeCharacter{1F37A}{ }', # Beer emoji (just turn into space for now)
'\DeclareUnicodeCharacter{2588}{\textblock}', # SOLID TEXT BLOCK
'\DeclareUnicodeCharacter{25CF}{\ding{108}}', # Dingbat 108 (black circle)
)),
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc,
'dims-training-manual.tex',
u'DIMS Training Manual Documentation',
u'Dave Dittrich', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'UW-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = ['appendices']
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc,
'dims-training-manual',
u'DIMS Training Manual Documentation',
[u'Dave Dittrich'],
1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc,
'dims-training-manual',
u'DIMS Training Manual Documentation',
'Dave Dittrich',
'dims-training-manual',
'DIMS Training Manual',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
git_branch = os.environ.get('GITBRANCH', "develop")
git_tag = os.environ.get('GITTAG', "latest")
#os.environ['DOCSURL'] = "file://{}".format(os.environ.get('GIT'))
if os.environ.get('DOCSURL') is None:
if not on_rtd:
os.environ['DOCSURL'] = "http://app.devops.develop:8080/docs/{}/html".format(git_branch)
intersphinx_cache_limit = -1 # days to keep the cached inventories (0 == forever)
if on_rtd:
intersphinx_mapping = {
'dimsusermanual': ("https://dims-user-manual.readthedocs.io/en/{0}/".format(git_tag), None),
'dimsjds': ("https://dims-jds.readthedocs.io/en/{0}/".format(git_tag), None),
}
else:
intersphinx_mapping = {
'dimsusermanual': ("{}/dims-ocd".format(os.environ['DOCSURL']), None),
'dimsjds': ("{}/dims-ocd".format(os.environ['DOCSURL']), None),
}
|
uw-dims/dims-training-manual
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 11,416
|
# util/_collections.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
import sys
import itertools
import weakref
import operator
from langhelpers import symbol
from compat import time_func, threading
EMPTY_SET = frozenset()
class NamedTuple(tuple):
"""tuple() subclass that adds labeled names.
Is also pickleable.
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
if labels:
t.__dict__.update(zip(labels, vals))
t._labels = labels
return t
def keys(self):
return [l for l in self._labels if l is not None]
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = \
update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self), )
def union(self, d):
if not self:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
def __init__(self, data):
self.__dict__['_data'] = data
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.itervalues()
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, object):
self._data[key] = object
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return self._data.keys()
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def values(self):
return [self[key] for key in self._list]
def itervalues(self):
return iter([self[key] for key in self._list])
def keys(self):
return list(self._list)
def iterkeys(self):
return iter(self.keys())
def items(self):
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
try:
self._list.append(key)
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self.update(d)
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [ a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [ a for a in self._list if a in self]
self._list += [ a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [ a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
_working_set = set
def __init__(self, iterable=None):
self._members = dict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __sub__(self, other):
return self.difference(other)
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools.ifilterfalse(other._members.__contains__,
self._members.iterkeys()):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools.ifilterfalse(self._members.__contains__,
other._members.iterkeys()):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).union(_iter_id(iterable)))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).difference(_iter_id(iterable)))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).intersection(_iter_id(iterable)))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
result._members.update(
self._working_set(self._member_id_tuples()).symmetric_difference(_iter_id(iterable)))
return result
def _member_id_tuples(self):
return ((id(v), v) for v in self._members.itervalues())
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(self._members.itervalues())
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return self._members.itervalues()
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._members.values())
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
if sys.version_info >= (2, 5):
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
else:
class PopulateDict(dict):
"""A dict which populates missing values via a creation function."""
def __init__(self, creator):
self.creator = creator
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
self[key] = value = self.creator(key)
return value
# define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
populate_column_dict = PopulateDict
def unique_list(seq, hashfunc=None):
seen = {}
if not hashfunc:
return [x for x in seq
if x not in seen
and not seen.__setitem__(x, True)]
else:
return [x for x in seq
if hashfunc(x) not in seen
and not seen.__setitem__(hashfunc(x), True)]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, basestring) and hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class WeakIdentityMapping(weakref.WeakKeyDictionary):
"""A WeakKeyDictionary with an object identity index.
Adds a .by_id dictionary to a regular WeakKeyDictionary. Trades
performance during mutation operations for accelerated lookups by id().
The usual cautions about weak dictionaries and iteration also apply to
this subclass.
"""
_none = symbol('none')
def __init__(self):
weakref.WeakKeyDictionary.__init__(self)
self.by_id = {}
self._weakrefs = {}
def __setitem__(self, object, value):
oid = id(object)
self.by_id[oid] = value
if oid not in self._weakrefs:
self._weakrefs[oid] = self._ref(object)
weakref.WeakKeyDictionary.__setitem__(self, object, value)
def __delitem__(self, object):
del self._weakrefs[id(object)]
del self.by_id[id(object)]
weakref.WeakKeyDictionary.__delitem__(self, object)
def setdefault(self, object, default=None):
value = weakref.WeakKeyDictionary.setdefault(self, object, default)
oid = id(object)
if value is default:
self.by_id[oid] = default
if oid not in self._weakrefs:
self._weakrefs[oid] = self._ref(object)
return value
def pop(self, object, default=_none):
if default is self._none:
value = weakref.WeakKeyDictionary.pop(self, object)
else:
value = weakref.WeakKeyDictionary.pop(self, object, default)
if id(object) in self.by_id:
del self._weakrefs[id(object)]
del self.by_id[id(object)]
return value
def popitem(self):
item = weakref.WeakKeyDictionary.popitem(self)
oid = id(item[0])
del self._weakrefs[oid]
del self.by_id[oid]
return item
def clear(self):
# Py2K
# in 3k, MutableMapping calls popitem()
self._weakrefs.clear()
self.by_id.clear()
# end Py2K
weakref.WeakKeyDictionary.clear(self)
def update(self, *a, **kw):
raise NotImplementedError
def _cleanup(self, wr, key=None):
if key is None:
key = wr.key
try:
del self._weakrefs[key]
except (KeyError, AttributeError): # pragma: no cover
pass # pragma: no cover
try:
del self.by_id[key]
except (KeyError, AttributeError): # pragma: no cover
pass # pragma: no cover
class _keyed_weakref(weakref.ref):
def __init__(self, object, callback):
weakref.ref.__init__(self, object, callback)
self.key = id(object)
def _ref(self, object):
return self._keyed_weakref(object, self._cleanup)
class LRUCache(dict):
"""Dictionary with 'squishy' removal of least
recently used items.
"""
def __init__(self, capacity=100, threshold=.5):
self.capacity = capacity
self.threshold = threshold
self._counter = 0
def _inc_counter(self):
self._counter += 1
return self._counter
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
by_counter = sorted(dict.values(self),
key=operator.itemgetter(2),
reverse=True)
for item in by_counter[self.capacity:]:
try:
del self[item[0]]
except KeyError:
# if we couldnt find a key, most
# likely some other thread broke in
# on us. loop around and try again
break
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj):
"""Set the value forthe current scope."""
self.registry[self.scopefunc()] = obj
def clear(self):
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def _iter_id(iterable):
"""Generator: ((id(o), o) for o in iterable)."""
for item in iterable:
yield id(item), item
|
SohKai/ChronoLogger
|
web/flask/lib/python2.7/site-packages/sqlalchemy/util/_collections.py
|
Python
|
mit
| 25,079
|
import mimetypes
import os
import logging
import urlparse
from django.conf.urls import url
import json
from django.http import Http404, HttpResponse, HttpResponseForbidden, HttpResponseNotFound, HttpResponseRedirect
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import redirect
from django.utils import simplejson
from django.utils.encoding import smart_str
from nginx_signing.signing import UriSigner
from sendfile import sendfile
from dss import settings
from spa.models.mix import Mix
from utils import here
logger = logging.getLogger('spa')
class AudioHandler(object):
@property
def urls(self):
pattern_list = [
url(r'^stream/(?P<mix_id>\d+)/$', 'spa.audio.start_streaming', name='audio_start_streaming'),
url(r'^download/(?P<mix_id>\d+)/$', 'spa.audio.download', name='audio_download'),
]
return pattern_list
def download(request, mix_id):
try:
if not request.user.is_authenticated():
return HttpResponseForbidden("Get tae fuck!!!")
mix = Mix.objects.get(pk=mix_id)
if mix is not None:
if mix.download_allowed:
response = {
'url': '',
'filename': smart_str('Deep South Sounds - %s.%s' % (mix.title, mix.filetype)),
'mime_type': 'application/octet-stream'
}
if mix.archive_path in [None, '']:
audio_file = mix.get_absolute_path()
if os.path.exists(audio_file):
response['url'] = audio_file.name
else:
response['url'] = mix.archive_path
return HttpResponse(json.dumps(response))
else:
return HttpResponse('Downloads not allowed for this mix', status=401)
except Exception, ex:
print ex
raise Http404("Mix not found")
def start_streaming(request, mix_id):
logger.debug('Start streaming called: %s' % mix_id)
try:
mix = Mix.objects.get(pk=mix_id)
if mix is not None:
mix.add_play(request.user)
# logger.debug('Found the mix (old method): %s' % mix.uid)
logger.debug('Found the mix (new method) %s' % mix.uid)
filename = "%s/mixes/%s.mp3" % (here(settings.MEDIA_ROOT), mix.uid)
logger.debug('Serving file: %s' % filename)
response = sendfile(request, filename)
return response
except Exception, ex:
print ex
raise Http404("Mix not found")
|
fergalmoran/dss
|
spa/audio.py
|
Python
|
bsd-2-clause
| 2,586
|
from weblab.core.new_server import WebLabAPI
weblab_api = WebLabAPI(['login_web', 'web', 'webclient'])
import webclient.web.view_index
import webclient.web.view_labs
import webclient.web.view_lab
@weblab_api.route_webclient("/test/", methods=["GET", "POST"])
def test():
weblab_api.ctx.reservation_id = "TEST"
return "HAI"
|
zstars/weblabdeusto
|
server/src/weblab/core/wl.py
|
Python
|
bsd-2-clause
| 338
|
"""
Functions for querying the Solr server.
results = query_solr( db, { 'q': 'obama' } )
sentences = results['response']['docs']
for sentence in sentences:
print(f"Found sentence ID: {sentence['story_sentences_id']}")
More information about Solr integration at docs/solr.markdown.
"""
import abc
import copy
import re
from typing import Union, List, Dict, Any, Optional
from mediawords.db import DatabaseHandler
from mediawords.solr.params import SolrParams
from mediawords.solr.request import solr_request
from mediawords.util.log import create_logger
from mediawords.util.parse_json import decode_json
from mediawords.util.perl import decode_object_from_bytes_if_needed
log = create_logger(__name__)
class _AbstractSolrException(Exception, metaclass=abc.ABCMeta):
"""Abstract .solr exception."""
pass
class _AbstractSolrInternalErrorException(_AbstractSolrException):
"""Internal code error (most likely a bug in the code)."""
pass
class McUppercaseBooleanOperatorsInvalidTypeException(_AbstractSolrInternalErrorException):
"""Exception thrown when weird stuff gets passed to _uppercase_boolean_operators()."""
pass
class McQuerySolrInternalErrorException(_AbstractSolrInternalErrorException):
"""Exception thrown when query_solr() receives something that it didn't expect."""
pass
class _AbstractSolrInvalidQueryException(_AbstractSolrException):
"""Invalid Solr query."""
pass
class McInsertCollectionMediaIDsInvalidQueryException(_AbstractSolrInvalidQueryException):
"""Invalid Solr query encountered in _insert_collection_media_ids()."""
pass
class McQuerySolrInvalidQueryException(_AbstractSolrInvalidQueryException):
"""Invalid Solr query in query_solr()."""
pass
class McQuerySolrRangeQueryException(_AbstractSolrInvalidQueryException):
"""Exception thrown on range queries which are not allowed."""
pass
def _uppercase_boolean_operators(query: Union[List[str], str]) -> Union[List[str], str]:
"""
Convert any "and", "or", or "not" operations in the argument to uppercase.
If the argument is a list, call ourselves on all elements of the list.
"""
query = decode_object_from_bytes_if_needed(query)
if not query:
return query
def upper_repl(match) -> str:
return match.group(1).upper()
if isinstance(query, list):
query = [_uppercase_boolean_operators(_) for _ in query]
elif isinstance(query, str):
query = re.sub(r'\b(and|or|not)\b', upper_repl, query)
else:
raise McUppercaseBooleanOperatorsInvalidTypeException(f"Invalid query type: {query}")
return query
def _insert_collection_media_ids(db: DatabaseHandler, q: str) -> str:
"""
Transform any "tags_id_media:" or "collections_id:" clauses into "media_id:" clauses with the "media_ids" that
corresponds to the given tags.
"""
q = decode_object_from_bytes_if_needed(q)
def get_media_ids_clause(match) -> str:
"""Given the argument of "tags_id_media:" or "collections_id:" clause, return the corresponding "media_ids"."""
arg = match.group(2)
tags_ids = []
if re.search(r'^\d+', arg):
tags_ids.append(arg)
else:
parens_match = re.search(r'^\((.*)\)$', arg)
if parens_match:
parens = parens_match.group(1)
parens = re.sub(r'or', ' ', parens, flags=re.IGNORECASE)
parens = parens.strip()
if re.search(r'[^\d\s]', parens):
raise McInsertCollectionMediaIDsInvalidQueryException((
f'Only "or" clauses allowed inside "tags_id_media:" or "collections_id:" clauses: {parens}; '
f'full match: {arg}'
))
for tags_id in re.split(r'\s+', parens):
tags_ids.append(tags_id)
elif re.search(r'^\[', arg):
raise McQuerySolrRangeQueryException(
'Range queries not allowed for "tags_id_media:" or "collections_id:" clauses'
)
else:
raise McInsertCollectionMediaIDsInvalidQueryException(
f'Unrecognized format of "tags_id_media:" or "collections_id:" clause: {arg}'
)
media_ids = db.query("""
SELECT media_id
FROM media_tags_map
WHERE tags_id IN %(tags_ids)s
ORDER BY media_id
""", {'tags_ids': tuple(tags_ids)}).flat()
# Replace empty list with an id that will always return nothing from Solr
if not media_ids:
media_ids = [-1]
media_clause = f"media_id:({' '.join([str(_) for _ in media_ids])})"
return media_clause
if not q:
return q
q = re.sub(r'(tags_id_media|collections_id):(\d+|\([^)]*\)|\[[^\]]*\])', get_media_ids_clause, q)
return q
def _replace_smart_quotes(query: Union[List[str], str]) -> Union[List[str], str]:
"""
Replace smart quotes with straight versions so that solr will treat them correctly.
"""
if query is None:
return None
elif isinstance(query, list):
return [_replace_smart_quotes(_) for _ in query]
elif isinstance(query, str):
return query.replace(u"\u201c", '"').replace(u"\u201d", '"')
def query_solr(db: DatabaseHandler, params: SolrParams) -> Dict[str, Any]:
"""
Execute a query on the Solr server using the given parameters. Return a maximum of 1 million sentences.
The "params" argument is a dictionary of query parameters to Solr, detailed here:
https://lucene.apache.org/solr/guide/6_6/common-query-parameters.html.
The query ("params['q']") is transformed: lower case boolean operators are made uppercase to make Solr recognize
them as boolean queries.
Return decoded response in the format described here:
https://lucene.apache.org/solr/guide/6_6/response-writers.html#ResponseWriters-JSONResponseWriter
"""
params = decode_object_from_bytes_if_needed(params)
# Avoid editing the dictionary itself
params = copy.deepcopy(params)
if not params:
raise McQuerySolrInternalErrorException('Parameters must be set.')
if not isinstance(params, dict):
raise McQuerySolrInternalErrorException('Parameters must be a dictionary.')
params['wt'] = 'json'
if 'rows' in params:
params['rows'] = int(params['rows'])
else:
params['rows'] = 1000
if 'df' not in params:
params['df'] = 'text'
params['rows'] = min(params['rows'], 10_000_000)
if 'q' not in params:
params['q'] = ''
# "fq" might be nonexistent or None
if not params.get('fq', None):
params['fq'] = []
if not isinstance(params['fq'], list):
params['fq'] = [params['fq']]
if ':[' in params['q']:
raise McQuerySolrRangeQueryException(
"Range queries are not allowed in the main query. Please use a filter query instead for range queries."
)
# if params['q']:
# params['q'] = f"{{!complexphrase inOrder=false}} {params['q']}"
params['q'] = _uppercase_boolean_operators(params['q'])
params['fq'] = _uppercase_boolean_operators(params['fq'])
params['q'] = _replace_smart_quotes(params['q'])
params['fq'] = _replace_smart_quotes(params['fq'])
if params['q']:
params['q'] = _insert_collection_media_ids(db=db, q=params['q'])
if params['fq']:
params['fq'] = [_insert_collection_media_ids(db=db, q=_) for _ in params['fq']]
response_json = solr_request(
path='select',
params={},
content=params,
content_type='application/x-www-form-urlencoded; charset=utf-8',
)
try:
response = decode_json(response_json)
except Exception as ex:
raise McQuerySolrInternalErrorException(f"Error parsing Solr JSON: {ex}\nJSON: {response_json}")
if 'error' in response:
raise McQuerySolrInvalidQueryException(f"Error received from Solr: {response_json}")
return response
def _get_intersection_of_lists(lists: List[List[int]]) -> List[int]:
"""Given a list of lists, each of which points to a list of IDs, return an intersection between them."""
lists = decode_object_from_bytes_if_needed(lists)
if not lists:
log.error("Lists are empty")
return []
intersection = set(lists[0])
for cur_list in lists[1:]:
intersection = intersection.intersection(set(cur_list))
return sorted(list(intersection))
def _get_stories_ids_from_stories_only_q(q: str) -> Optional[List[int]]:
"""
Transform the pseudoquery fields in the query and then run a simple pattern to detect queries that consists of one
or more AND'ed "stories_id:..." clauses.
For those cases, just return the story IDs list rather than running it through Solr.
Return None if the query does not match.
"""
q = decode_object_from_bytes_if_needed(q)
if not q:
return None
q = re.sub(r'^\s*\(\s*(?P<inside_parens>.*)\s*\)\s*$', r'\g<inside_parens>', q)
q = q.strip()
if 'and' in q.lower():
p = q.lower().index('and')
else:
p = -1
if p > 0:
q_a = q[:p - 1]
a_stories_ids = _get_stories_ids_from_stories_only_q(q=q_a)
if a_stories_ids is None:
return None
q_b = q[p + 4:]
b_stories_ids = _get_stories_ids_from_stories_only_q(q=q_b)
if b_stories_ids is None:
return None
r = _get_intersection_of_lists(lists=[a_stories_ids, b_stories_ids])
return r
story_match = re.search(r'^stories_id:(\d+)$', q)
if story_match:
stories_id = int(story_match.group(1))
r = [stories_id]
return r
if re.search(r'^stories_id:\([\s\d]+\)$', q):
stories_ids = []
for story_match in re.findall(r'(\d+)', q):
stories_id = int(story_match)
stories_ids.append(stories_id)
return stories_ids
return None
def _get_stories_ids_from_stories_only_params(params: SolrParams) -> Optional[List[int]]:
"""
Transform the pseudoquery fields in the "q" and "fq" params and then run a simple pattern to detect queries that
consists of one or more AND'ed "stories_id:..." clauses in the "q" param and all "fq" params.
Return None if either the "q" or any of the "fq" params do not match.
"""
params = decode_object_from_bytes_if_needed(params)
# Avoid editing the dictionary itself
params = copy.deepcopy(params)
q = params.get('q', '')
fqs = params.get('fq', [])
start = params.get('start', None)
rows = params.get('rows', None)
if start is not None:
start = int(start)
if rows is not None:
rows = int(rows)
# Return None if there are any unrecognized params
param_keys = set(list(params.keys()))
allowed_params = {'q', 'fq', 'start', 'rows'}
if not param_keys.issubset(allowed_params):
log.warning(f"Parameters have unrecognized keys: {param_keys - allowed_params}; all keys: {param_keys}")
return None
if not q:
log.error("'q' is unset.")
return None
stories_ids_lists = []
if fqs:
if not isinstance(fqs, list):
fqs = [fqs]
for fq in fqs:
stories_ids = _get_stories_ids_from_stories_only_q(q=fq)
if stories_ids is None:
return None
else:
stories_ids_lists.append(stories_ids)
# If there are stories_ids only "fqs" and a '*:*' "q", just use the "fqs"
if stories_ids_lists and q == '*:*':
r = _get_intersection_of_lists(lists=stories_ids_lists)
# If there were no "fqs" and a '*:*' "q", return None
elif q == '*:*':
return None
# Otherwise, combine "q" and "fqs"
else:
stories_ids = _get_stories_ids_from_stories_only_q(q=q)
if stories_ids is None:
return None
r = _get_intersection_of_lists(lists=[stories_ids] + stories_ids_lists)
if start is not None:
r = r[start:]
if rows is not None:
r = r[:rows]
return r
def get_solr_num_found(db: DatabaseHandler, params: SolrParams) -> int:
"""Execute the query and return only the number of documents found."""
params = decode_object_from_bytes_if_needed(params)
# Avoid editing the dictionary itself
params = copy.deepcopy(params)
params['rows'] = 0
params['json.facet'] = '{x:"hll(stories_id)"}'
res = query_solr(db=db, params=params)
# if count is 0, there is no value for x
num_found = res['facets']['x'] if res['facets']['count'] > 0 else 0
return num_found
def search_solr_for_stories_ids(db: DatabaseHandler, params: SolrParams) -> List[int]:
"""
Return a list of all of the "stories_ids" that match the Solr query.
Using Solr side grouping on the "stories_id" field.
"""
params = decode_object_from_bytes_if_needed(params)
# Avoid editing the dictionary itself
params = copy.deepcopy(params)
stories_ids = _get_stories_ids_from_stories_only_params(params)
if stories_ids:
return stories_ids
params['fl'] = 'stories_id'
response = query_solr(db=db, params=params)
stories_ids = [_['stories_id'] for _ in response['response']['docs']]
return stories_ids
def search_solr_for_processed_stories_ids(db: DatabaseHandler,
q: str,
fq: Optional[Union[str, List[str]]],
last_ps_id: int,
num_stories: int,
sort_by_random: bool = False) -> List[int]:
"""
Return the first "num_stories" "processed_stories_id" that match the given query, sorted by "processed_stories_id"
and with "processed_stories_id" greater than "last_ps_id".
Returns at most "num_stories" stories.
If "sort_by_random" is True, tell Solr to sort results by random order.
"""
q = decode_object_from_bytes_if_needed(q)
fq = decode_object_from_bytes_if_needed(fq)
if isinstance(last_ps_id, bytes):
last_ps_id = decode_object_from_bytes_if_needed(last_ps_id)
if isinstance(num_stories, bytes):
num_stories = decode_object_from_bytes_if_needed(num_stories)
if isinstance(sort_by_random, bytes):
sort_by_random = decode_object_from_bytes_if_needed(sort_by_random)
last_ps_id = int(last_ps_id)
num_stories = int(num_stories)
sort_by_random = bool(int(sort_by_random))
if not num_stories:
return []
if fq:
if not isinstance(fq, list):
fq = [fq]
else:
fq = []
if last_ps_id:
min_ps_id = last_ps_id + 1
fq.append(f"processed_stories_id:[{min_ps_id} TO *]")
params = {
'q': q,
'fq': fq,
'fl': 'processed_stories_id',
'rows': num_stories,
'sort': 'random_1 asc' if sort_by_random else 'processed_stories_id asc',
}
response = query_solr(db=db, params=params)
ps_ids = [_['processed_stories_id'] for _ in response['response']['docs']]
return ps_ids
def search_solr_for_media_ids(db: DatabaseHandler, params: SolrParams) -> List[int]:
"""Return all of the media IDs that match the Solr query."""
params = decode_object_from_bytes_if_needed(params)
# Avoid editing the dictionary itself
params = copy.deepcopy(params)
params['fl'] = 'media_id'
params['facet'] = 'true'
params['facet.limit'] = 1_000_000
params['facet.field'] = 'media_id'
params['facet.mincount'] = 1
params['rows'] = 0
response = query_solr(db=db, params=params)
counts = response['facet_counts']['facet_fields']['media_id']
# Every second element (?)
media_ids = counts[::2]
return media_ids
|
berkmancenter/mediacloud
|
apps/common/src/python/mediawords/solr/__init__.py
|
Python
|
agpl-3.0
| 16,005
|
from django import forms
from django.contrib.auth.models import User
from django.core.paginator import Paginator
from django.shortcuts import render
from django.utils.translation import ugettext as _
from wagtail.admin import messages
from wagtail.admin.forms.search import SearchForm
from wagtail.admin.rich_text import get_rich_text_editor_widget
from wagtail.admin.widgets import (
AdminAutoHeightTextInput, AdminDateInput, AdminDateTimeInput, AdminPageChooser, AdminTimeInput)
from wagtail.core.models import Page
from wagtail.documents.widgets import AdminDocumentChooser
from wagtail.images.widgets import AdminImageChooser
from wagtail.snippets.widgets import AdminSnippetChooser
class ExampleForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['page_chooser'].widget = AdminPageChooser()
self.fields['image_chooser'].widget = AdminImageChooser()
self.fields['document_chooser'].widget = AdminDocumentChooser()
self.fields['snippet_chooser'].widget = AdminSnippetChooser(Page)
self.fields['date'].widget = AdminDateInput()
self.fields['time'].widget = AdminTimeInput()
self.fields['datetime'].widget = AdminDateTimeInput()
self.fields['auto_height_text'].widget = AdminAutoHeightTextInput()
self.fields['default_rich_text'].widget = get_rich_text_editor_widget('default')
CHOICES = (
('choice1', 'choice 1'),
('choice2', 'choice 2'),
)
text = forms.CharField(required=True, help_text="help text")
auto_height_text = forms.CharField(required=True)
default_rich_text = forms.CharField(required=True)
url = forms.URLField(required=True)
email = forms.EmailField(max_length=254)
date = forms.DateField()
time = forms.TimeField()
datetime = forms.DateTimeField()
select = forms.ChoiceField(choices=CHOICES)
radio_select = forms.ChoiceField(choices=CHOICES, widget=forms.RadioSelect)
boolean = forms.BooleanField(required=False)
page_chooser = forms.BooleanField(required=True)
image_chooser = forms.BooleanField(required=True)
document_chooser = forms.BooleanField(required=True)
snippet_chooser = forms.BooleanField(required=True)
def index(request):
form = SearchForm(placeholder=_("Search something"))
example_form = ExampleForm()
messages.success(request, _("Success message"), buttons=[
messages.button('', _('View live')),
messages.button('', _('Edit'))
])
messages.warning(request, _("Warning message"), buttons=[
messages.button('', _('View live')),
messages.button('', _('Edit'))
])
messages.error(request, _("Error message"), buttons=[
messages.button('', _('View live')),
messages.button('', _('Edit'))
])
paginator = Paginator(list(range(100)), 10)
page = paginator.page(2)
user = User(email='david@torchbox.com')
return render(request, 'wagtailstyleguide/base.html', {
'search_form': form,
'example_form': example_form,
'example_page': page,
'user': user,
})
|
mikedingjan/wagtail
|
wagtail/contrib/styleguide/views.py
|
Python
|
bsd-3-clause
| 3,140
|
import os, sys
import datetime
import iris
import iris.unit as unit
import iris.analysis.cartography
import numpy as np
import iris.analysis.geometry
from shapely.geometry import Polygon
from iris.coord_categorisation import add_categorised_coord
import imp
imp.load_source('UnrotateUpdateCube', '/nfs/see-fs-01_users/eepdw/python_scripts/Monsoon_Python_Scripts/modules/unrotate_and_update_pole.py')
from UnrotateUpdateCube import *
import pdb
diag = 'divergence_925.0'
pp_file_path='/nfs/a90/eepdw/Data/EMBRACE/'
experiment_ids = ['djzny', 'djznw', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
experiment_ids = ['dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq'] # All 12
#experiment_ids = ['dklyu']
# Min and max lats lons from smallest model domain (dkbhu) - see spreadsheet
latmin=-6.79
latmax=33.038
lonmin=340
lonmax=379.98
lonmin_g=64.115
lonmax_g=101.866
lat_constraint=iris.Constraint(latitude= lambda la: latmin <= la.point <= latmax)
grid_lat_constraint=iris.Constraint(grid_latitude= lambda la: latmin <= la.point <= latmax)
polygon = Polygon(((73., 21.), (83., 16.), (87., 22.), (75., 27.)))
#experiment_ids = ['dkhgu']
#experiment_ids = ['djzns', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ]
#experiment_ids = [ 'dklwu', 'dklzq', 'dklyu', 'dkmbq', 'dkbhu', 'djznu', 'dkhgu', 'djzns' ]
#experiment_ids = ['djznu', 'dkhgu' ] # High Res
#experiment_ids = ['djznw', 'djzny', 'djznq', 'dkjxq']
#experiment_ids = ['djznw', 'djzny', 'djznq', 'dkmbq', 'dklzq', 'dkjxq' ] # Params
# Load global LAM
# dtmindt = datetime.datetime(2011,8,19,0,0,0)
# dtmaxdt = datetime.datetime(2011,9,7,23,0,0)
# dtmin = unit.date2num(dtmindt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
# dtmax = unit.date2num(dtmaxdt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
# time_constraint = iris.Constraint(time= lambda t: dtmin <= t.point <= dtmax)
# Min and max lats lons from smallest model domain (dkbhu) - see spreadsheet
for experiment_id in experiment_ids:
lon_constraint=iris.Constraint(longitude= lambda lo: lonmin_g <= lo.point <= lonmax_g)
grid_lon_constraint=iris.Constraint(grid_longitude= lambda lo: lonmin <= lo.point <= lonmax)
expmin1 = experiment_id[:-1]
fu = '%s%s/%s/%s.pp' % (pp_file_path, expmin1, experiment_id, diag)
flsm = '%s%s/%s/30.pp' % (pp_file_path, expmin1, experiment_id)
print experiment_id
sys.stdout.flush()
#cube_names = ['%s' % cube_name_param, '%s' % cube_name_explicit]
#pdb.set_trace()
cube = iris.load_cube(fu, lat_constraint & lon_constraint)
#cube.coord('grid_longitude').guess_bounds()
#cube.coord('grid_latitude').guess_bounds()
#cube= unrotate_pole_update_cube(cube)
cube.coord('longitude').guess_bounds()
cube.coord('latitude').guess_bounds()
sys.stdout.flush()
#pdb.set_trace()
# Calculate weights
l=iris.analysis.geometry.geometry_area_weights(cube, polygon) # Polygon weights
# Load land/sea mask
#lsm = iris.load_cube(flsm, ('land_binary_mask' ) & grid_lat_constraint & grid_lon_constraint)
#print lsm
sys.stdout.flush()
# For Sea and Land, mask area and calculate mean of each hour for sea/land and SAVE as numpy array
#for s in (['sea','land']):
#pdb.set_trace()
# if s=='sea':
# w=1-lsm.data
# if s=='land':
# w=lsm.data
#pdb.set_trace()
#w=np.repeat(w,cube.coord('time').points.shape[0]).reshape(cube.shape)
coords = ('latitude', 'longitude')
collapsed_cube = cube.collapsed(coords,
iris.analysis.MEAN,
weights=l)
np.savez('%s%s/%s/%s_TimeVar_np_domain_constrain' % (pp_file_path, expmin1, experiment_id, diag), \
data=collapsed_cube.data.data, time_coords=collapsed_cube.coord('time').points)
#del lsm
|
peterwilletts24/Python-Scripts
|
EMBRACE/Time_Variability_Divergence.py
|
Python
|
mit
| 4,042
|
"""Main classes to add caching features to ``requests.Session``
.. autosummary::
:nosignatures:
CachedSession
CacheMixin
.. Explicitly show inherited method docs on CachedSession instead of CachedMixin
.. autoclass:: requests_cache.session.CachedSession
:show-inheritance:
:inherited-members:
.. autoclass:: requests_cache.session.CacheMixin
"""
from contextlib import contextmanager
from logging import getLogger
from threading import RLock
from typing import TYPE_CHECKING, Callable, Dict, Iterable, Optional
from requests import PreparedRequest, Response
from requests import Session as OriginalSession
from requests.hooks import dispatch_hook
from urllib3 import filepost
from ._utils import get_valid_kwargs
from .backends import BackendSpecifier, init_backend
from .cache_control import CacheActions, ExpirationTime, get_expiration_seconds
from .models import AnyResponse, CachedResponse, set_response_defaults
__all__ = ['ALL_METHODS', 'CachedSession', 'CacheMixin']
ALL_METHODS = ['GET', 'HEAD', 'OPTIONS', 'POST', 'PUT', 'PATCH', 'DELETE']
FILTER_FN = Callable[[AnyResponse], bool]
logger = getLogger(__name__)
if TYPE_CHECKING:
MIXIN_BASE = OriginalSession
else:
MIXIN_BASE = object
class CacheMixin(MIXIN_BASE):
"""Mixin class that extends :py:class:`requests.Session` with caching features.
See :py:class:`.CachedSession` for usage details.
"""
def __init__(
self,
cache_name: str = 'http_cache',
backend: BackendSpecifier = None,
expire_after: ExpirationTime = -1,
urls_expire_after: Dict[str, ExpirationTime] = None,
cache_control: bool = False,
allowable_codes: Iterable[int] = (200,),
allowable_methods: Iterable[str] = ('GET', 'HEAD'),
filter_fn: FILTER_FN = None,
stale_if_error: bool = False,
**kwargs,
):
self.cache = init_backend(cache_name, backend, **kwargs)
self.allowable_codes = allowable_codes
self.allowable_methods = allowable_methods
self.expire_after = expire_after
self.urls_expire_after = urls_expire_after
self.cache_control = cache_control
self.filter_fn = filter_fn or (lambda r: True)
self.stale_if_error = stale_if_error or kwargs.pop('old_data_on_error', False)
self._disabled = False
self._lock = RLock()
# If the superclass is custom Session, pass along any valid kwargs
session_kwargs = get_valid_kwargs(super().__init__, kwargs)
super().__init__(**session_kwargs) # type: ignore
def request( # type: ignore # Note: An extra param (expire_after) is added here
self,
method: str,
url: str,
*args,
expire_after: ExpirationTime = None,
**kwargs,
) -> AnyResponse:
"""This method prepares and sends a request while automatically performing any necessary
caching operations. This will be called by any other method-specific ``requests`` functions
(get, post, etc.). This does not include prepared requests, which will still be cached via
``send()``.
See :py:meth:`requests.Session.request` for parameters. Additional parameters:
Args:
expire_after: Expiration time to set only for this request; see details below.
Overrides ``CachedSession.expire_after``. Accepts all the same values as
``CachedSession.expire_after``. Use ``-1`` to disable expiration.
Returns:
Either a new or cached response
**Order of operations:** For reference, a request will pass through the following methods:
1. :py:func:`requests.get`/:py:meth:`requests.Session.get` or other method-specific functions (optional)
2. :py:meth:`.CachedSession.request`
3. :py:meth:`requests.Session.request`
4. :py:meth:`.CachedSession.send`
5. :py:meth:`.BaseCache.get_response`
6. :py:meth:`requests.Session.send` (if not previously cached)
7. :py:meth:`.BaseCache.save_response` (if not previously cached)
"""
# If present, set per-request expiration as a request header, to be handled in send()
if expire_after is not None:
kwargs.setdefault('headers', {})
kwargs['headers']['Cache-Control'] = f'max-age={get_expiration_seconds(expire_after)}'
with patch_form_boundary(**kwargs):
return super().request(method, url, *args, **kwargs)
def send(
self, request: PreparedRequest, expire_after: ExpirationTime = None, **kwargs
) -> AnyResponse:
"""Send a prepared request, with caching. See :py:meth:`.request` for notes on behavior, and
see :py:meth:`requests.Session.send` for parameters. Additional parameters:
Args:
expire_after: Expiration time to set only for this request
"""
# Determine which actions to take based on request info and cache settings
cache_key = self.cache.create_key(request, **kwargs)
actions = CacheActions.from_request(
cache_key=cache_key,
request=request,
request_expire_after=expire_after,
session_expire_after=self.expire_after,
urls_expire_after=self.urls_expire_after,
cache_control=self.cache_control,
**kwargs,
)
# Attempt to fetch a cached response
cached_response: Optional[CachedResponse] = None
if not (self._disabled or actions.skip_read):
cached_response = self.cache.get_response(cache_key)
actions.update_from_cached_response(cached_response)
is_expired = getattr(cached_response, 'is_expired', False)
# If the response is expired or missing, or the cache is disabled, then fetch a new response
if cached_response is None:
response = self._send_and_cache(request, actions, **kwargs)
elif is_expired and self.stale_if_error:
response = self._resend_and_ignore(request, actions, cached_response, **kwargs)
elif is_expired:
response = self._resend(request, actions, cached_response, **kwargs)
else:
response = cached_response
# If the request has been filtered out and was previously cached, delete it
if not self.filter_fn(response):
logger.debug(f'Deleting filtered response for URL: {response.url}')
self.cache.delete(cache_key)
return response
# Dispatch any hooks here, because they are removed before pickling
return dispatch_hook('response', request.hooks, response, **kwargs)
def _is_cacheable(self, response: Response, actions: CacheActions) -> bool:
"""Perform all checks needed to determine if the given response should be saved to the cache"""
cache_criteria = {
'disabled cache': self._disabled,
'disabled method': str(response.request.method) not in self.allowable_methods,
'disabled status': response.status_code not in self.allowable_codes,
'disabled by filter': not self.filter_fn(response),
'disabled by headers or expiration params': actions.skip_write,
}
logger.debug(f'Pre-cache checks for response from {response.url}: {cache_criteria}')
return not any(cache_criteria.values())
def _send_and_cache(
self,
request: PreparedRequest,
actions: CacheActions,
cached_response: CachedResponse = None,
**kwargs,
) -> AnyResponse:
"""Send the request and cache the response, unless disabled by settings or headers.
If applicable, also add headers to make a conditional request. If we get a 304 Not Modified
response, return the stale cache item.
"""
request.headers.update(actions.validation_headers)
response = super().send(request, **kwargs)
actions.update_from_response(response)
if self._is_cacheable(response, actions):
self.cache.save_response(response, actions.cache_key, actions.expires)
elif cached_response and response.status_code == 304:
return self._update_revalidated_response(actions, response, cached_response)
else:
logger.debug(f'Skipping cache write for URL: {request.url}')
return set_response_defaults(response, actions.cache_key)
def _resend(
self,
request: PreparedRequest,
actions: CacheActions,
cached_response: CachedResponse,
**kwargs,
) -> AnyResponse:
"""Attempt to resend the request and cache the new response. If the request fails, delete
the stale cache item.
"""
logger.debug('Stale response; attempting to re-send request')
try:
return self._send_and_cache(request, actions, cached_response, **kwargs)
except Exception:
self.cache.delete(actions.cache_key)
raise
def _resend_and_ignore(
self,
request: PreparedRequest,
actions: CacheActions,
cached_response: CachedResponse,
**kwargs,
) -> AnyResponse:
"""Attempt to resend the request and cache the new response. If there are any errors, ignore
them and and return the stale cache item.
"""
# Attempt to send the request and cache the new response
logger.debug('Stale response; attempting to re-send request')
try:
response = self._send_and_cache(request, actions, cached_response, **kwargs)
response.raise_for_status()
return response
except Exception:
logger.warning(
f'Request for URL {request.url} failed; using cached response', exc_info=True
)
return cached_response
def _update_revalidated_response(
self, actions: CacheActions, response: Response, cached_response: CachedResponse
) -> CachedResponse:
"""After revalidation, update the cached response's headers and reset its expiration"""
logger.debug(
f'Response for URL {response.request.url} has not been modified; updating and using cached response'
)
cached_response.headers.update(response.headers)
actions.update_from_response(cached_response)
cached_response.expires = actions.expires
self.cache.save_response(cached_response, actions.cache_key, actions.expires)
return cached_response
@contextmanager
def cache_disabled(self):
"""
Context manager for temporary disabling the cache
.. warning:: This method is not thread-safe.
Example:
>>> s = CachedSession()
>>> with s.cache_disabled():
... s.get('http://httpbin.org/ip')
"""
if self._disabled:
yield
else:
self._disabled = True
try:
yield
finally:
self._disabled = False
def remove_expired_responses(self, expire_after: ExpirationTime = None):
"""Remove expired responses from the cache, optionally with revalidation
Args:
expire_after: A new expiration time used to revalidate the cache
"""
self.cache.remove_expired_responses(expire_after)
def __repr__(self):
repr_attrs = [
'cache',
'expire_after',
'urls_expire_after',
'allowable_codes',
'allowable_methods',
'stale_if_error',
'cache_control',
]
attr_strs = [f'{k}={repr(getattr(self, k))}' for k in repr_attrs]
return f'<CachedSession({", ".join(attr_strs)})>'
class CachedSession(CacheMixin, OriginalSession):
"""Session class that extends :py:class:`requests.Session` with caching features.
See individual :py:mod:`backend classes <requests_cache.backends>` for additional backend-specific arguments.
Also see :ref:`user-guide` for more details and examples on how the following arguments
affect cache behavior.
Args:
cache_name: Cache prefix or namespace, depending on backend
backend: Cache backend name or instance; name may be one of
``['sqlite', 'filesystem', 'mongodb', 'gridfs', 'redis', 'dynamodb', 'memory']``
serializer: Serializer name or instance; name may be one of
``['pickle', 'json', 'yaml', 'bson']``.
expire_after: Time after which cached items will expire
urls_expire_after: Expiration times to apply for different URL patterns
cache_control: Use Cache-Control headers to set expiration
allowable_codes: Only cache responses with one of these status codes
allowable_methods: Cache only responses for one of these HTTP methods
match_headers: Match request headers when reading from the cache; may be either a boolean
or a list of specific headers to match
ignored_parameters: List of request parameters to not match against, and exclude from the cache
filter_fn: Function that takes a :py:class:`~requests.Response` object and returns a boolean
indicating whether or not that response should be cached. Will be applied to both new
and previously cached responses.
key_fn: Function for generating custom cache keys based on request info
stale_if_error: Return stale cache data if a new request raises an exception
"""
@contextmanager
def patch_form_boundary(**request_kwargs):
"""If the ``files`` param is present, patch the form boundary used to separate multipart
uploads. ``requests`` does not provide a way to pass a custom boundary to urllib3, so this just
monkey-patches it instead.
"""
if request_kwargs.get('files'):
original_boundary = filepost.choose_boundary
filepost.choose_boundary = lambda: '##requests-cache-form-boundary##'
yield
filepost.choose_boundary = original_boundary
else:
yield
|
reclosedev/requests-cache
|
requests_cache/session.py
|
Python
|
bsd-2-clause
| 14,100
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Documentation',
'category': 'Website',
'summary': 'Website, Documentation',
'version': '8.0.1.3.0',
'description': """
Documentation Using Website, pages and google docs
To create a page you can type: http://localhost:9069/page/asda
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'depends': [
'website',
],
'data': [
'data/doc_data.xml',
'security/ir.model.access.csv',
'security/security.xml',
'views/doc.xml',
'views/website_doc.xml',
],
'demo': [
],
'qweb': [
'static/src/xml/website_doc.xml'
],
'installable': True,
'application': True,
}
|
jobiols/odoo-web
|
website_doc/__openerp__.py
|
Python
|
agpl-3.0
| 1,690
|
"""Tests for Order-transaction reconciliation and reporting."""
from ddt import ddt, data, unpack
from edx.analytics.tasks.tests import unittest
from edx.analytics.tasks.tests.map_reduce_mixins import MapperTestMixin, ReducerTestMixin
from edx.analytics.tasks.reports.reconcile import (
ReconcileOrdersAndTransactionsTask,
BaseOrderItemRecord,
OrderItemRecord,
BaseTransactionRecord,
TransactionRecord,
OrderTransactionRecord,
LOW_ORDER_ID_SHOPPINGCART_ORDERS,
)
TEST_DATE = '2015-06-01'
TEST_LATER_DATE = '2015-06-10'
DEFAULT_REF_ID = "EDX-12345"
HIVE_NULL = '\\N'
FIRST_ORDER_ITEM = '2345678'
SECOND_ORDER_ITEM = '2345679'
FIRST_TRANSACTION = '123423453456'
SECOND_TRANSACTION = '212342345345'
THIRD_TRANSACTION = '312342345345'
class ReconciliationTaskMixin(object):
"""Mixin class to provide constructors for input data."""
def create_orderitem(self, is_refunded=False, **kwargs):
"""Create an OrderItemRecord with default values."""
params = {
'order_processor': 'otto', # "shoppingcart" or "otto"
'user_id': '12345',
'order_id': '1234567',
'line_item_id': FIRST_ORDER_ITEM,
'line_item_product_id': '1', # for "shoppingcart", this is the kind of orderitem table.
'line_item_price': '50.00',
'line_item_unit_price': '50.00',
'line_item_quantity': '1',
'product_class': 'seat', # e.g. seat, donation
'course_id': 'edx/demo_course/2014', # Was called course_key
'product_detail': 'verified', # contains course mode
'username': 'testuser',
'user_email': 'test@example.com',
'date_placed': TEST_DATE,
'iso_currency_code': 'USD',
'status': 'purchased',
'refunded_amount': '0.0',
'refunded_quantity': '0',
'payment_ref_id': DEFAULT_REF_ID,
}
if is_refunded:
params.update(**{
'refunded_amount': '50.00',
'refunded_quantity': '1',
'status': 'refunded',
})
params.update(**kwargs)
return OrderItemRecord(**params)
def create_transaction(self, **kwargs):
"""Create a TransactionRecord with default values."""
params = {
'date': TEST_DATE,
'payment_gateway_id': 'cybersource',
'payment_gateway_account_id': 'edx_account',
'payment_ref_id': DEFAULT_REF_ID,
'iso_currency_code': 'USD',
'amount': '50.00',
'transaction_fee': None,
'transaction_type': 'sale',
'payment_method': 'credit_card',
'payment_method_type': 'visa',
'transaction_id': FIRST_TRANSACTION,
}
params.update(**kwargs)
return TransactionRecord(**params)
def create_refunding_transaction(self, **kwargs):
"""Add default refund values to a default transaction."""
params = {
'date': TEST_LATER_DATE,
'amount': '-50.00',
'transaction_type': 'refund',
'transaction_id': SECOND_TRANSACTION,
}
params.update(**kwargs)
return self.create_transaction(**params)
task_class = ReconcileOrdersAndTransactionsTask
@ddt
class ReconciliationTaskMapTest(ReconciliationTaskMixin, MapperTestMixin, unittest.TestCase):
"""Test financial order-transaction mapper"""
def _convert_record_to_line(self, record):
"""Convert a record, substituting HIVE_NULL for None, for input to mapper."""
return '\t'.join([str(v) if v is not None else HIVE_NULL for v in record])
def _convert_record_to_expected_output(self, record):
"""Convert a record, preserving None, for comparison with mapper output."""
return [str(v) if v is not None else None for v in record]
def test_bad_line(self):
line = 'arbitrary stuff'
with self.assertRaisesRegexp(ValueError, "unrecognized line"):
self.assert_no_map_output_for(line)
def test_default_order(self):
orderitem = self.create_orderitem()
line = self._convert_record_to_line(orderitem)
expected_key = DEFAULT_REF_ID
expected_value = ('OrderItemRecord', self._convert_record_to_expected_output(orderitem))
self.assert_single_map_output(line, expected_key, expected_value)
def test_default_transaction(self):
trans = self.create_transaction()
line = self._convert_record_to_line(trans)
expected_key = DEFAULT_REF_ID
expected_value = ('TransactionRecord', self._convert_record_to_expected_output(trans))
self.assert_single_map_output(line, expected_key, expected_value)
@data(
('product_detail', ''),
('refunded_amount', '0.0'),
('refunded_quantity', '0'),
)
@unpack
def test_orderitem_mapped_nulls(self, fieldname, expected_value):
expected_orderitem = self.create_orderitem(**{fieldname: expected_value})
params = self.create_orderitem()._asdict() # pylint: disable=no-member,protected-access
params[fieldname] = HIVE_NULL
input_orderitem = BaseOrderItemRecord(**params)
line = self._convert_record_to_line(input_orderitem)
expected_key = DEFAULT_REF_ID
expected_value = ('OrderItemRecord', self._convert_record_to_expected_output(expected_orderitem))
self.assert_single_map_output(line, expected_key, expected_value)
@data(
('transaction_fee', None),
)
@unpack
def test_transaction_mapped_nulls(self, fieldname, expected_value):
expected_transaction = self.create_transaction(**{fieldname: expected_value})
params = self.create_transaction()._asdict() # pylint: disable=no-member,protected-access
params[fieldname] = HIVE_NULL
input_transaction = BaseTransactionRecord(**params)
line = self._convert_record_to_line(input_transaction)
expected_key = DEFAULT_REF_ID
expected_value = ('TransactionRecord', self._convert_record_to_expected_output(expected_transaction))
self.assert_single_map_output(line, expected_key, expected_value)
@data(
('1000', 'EDX-101000'),
('1200', 'EDX-101200'),
('3211', 'EDX-103211'),
)
@unpack
def test_mapped_payment_ref_id(self, ref_id, expected_ref_id):
trans = self.create_transaction(payment_ref_id=ref_id)
line = self._convert_record_to_line(trans)
expected_key = expected_ref_id
expected_value = ('TransactionRecord', self._convert_record_to_expected_output(trans))
self.assert_single_map_output(line, expected_key, expected_value)
def test_mapped_payment_ref_id_exceptions(self):
for ref_id in LOW_ORDER_ID_SHOPPINGCART_ORDERS:
trans = self.create_transaction(payment_ref_id=ref_id)
line = self._convert_record_to_line(trans)
expected_key = ref_id
expected_value = ('TransactionRecord', self._convert_record_to_expected_output(trans))
self.assert_single_map_output(line, expected_key, expected_value)
@ddt
class ReconciliationTaskReducerTest(ReconciliationTaskMixin, ReducerTestMixin, unittest.TestCase):
"""Test financial order-transaction reducer"""
def _check_output(self, inputs, column_values, **extra_values):
"""Compare generated with expected output."""
# Namedtuple objects need to be converted to lists on input.
inputs = [(type(inputval).__name__, list(inputval)) for inputval in inputs]
output = self._get_reducer_output(inputs)
if not isinstance(column_values, list):
column_values = [column_values]
self.assertEquals(len(output), len(column_values), '{0} != {1}'.format(output, column_values))
def record_sort_key(record):
"""Sort function for records, by order then by transaction."""
return record.order_line_item_id, record.transaction_id
sorted_output = sorted([OrderTransactionRecord.from_job_output(output_tuple[0]) for output_tuple in output],
key=record_sort_key)
for record, expected_columns in zip(sorted_output, column_values):
# This reducer does the packing in a different way, converting to TSV *before*
# putting into the output. So unpack that here, and output as a dict,
# so that column names can be used instead of numbers.
output_dict = record._asdict() # pylint: disable=no-member,protected-access
expected_columns.update(**extra_values)
for column_num, expected_value in expected_columns.iteritems():
self.assertEquals(output_dict[column_num], expected_value)
def test_no_transaction(self):
inputs = [self.create_orderitem(), ]
self._check_output(inputs, {
'order_audit_code': 'ERROR_ORDER_NOT_BALANCED',
'orderitem_audit_code': 'ERROR_NO_TRANSACTION',
'transaction_audit_code': 'NO_TRANSACTION',
'transaction_date': None,
'transaction_id': None,
'unique_transaction_id': None,
'transaction_payment_gateway_id': None,
'transaction_payment_gateway_account_id': None,
'transaction_type': None,
'transaction_payment_method': None,
'transaction_amount': None,
'transaction_iso_currency_code': None,
'transaction_fee': None,
'transaction_amount_per_item': None,
'transaction_fee_per_item': None,
})
def test_honor_order(self):
# The honor code is not actually important here, the zero price is.
# But this happens most commonly when people enroll on Otto for honor mode.
inputs = [self.create_orderitem(**{
'line_item_price': '0.0',
'line_item_unit_price': '0.0',
'product_detail': 'honor',
}), ]
self._check_output(inputs, {
'order_audit_code': 'ORDER_BALANCED',
'orderitem_audit_code': 'NO_COST',
'transaction_audit_code': 'NO_TRANSACTION',
'transaction_id': None,
})
def test_refunded_honor_order(self):
# The honor code is not actually important here, the zero price is.
# But this happens most commonly when people enroll on Otto for honor mode.
inputs = [self.create_orderitem(**{
'line_item_price': '0.0',
'line_item_unit_price': '0.0',
'product_detail': 'honor',
'status': 'refunded',
'refunded_amount': '0.0',
'refunded_quantity': '1',
}), ]
self._check_output(inputs, {
'order_audit_code': 'ORDER_BALANCED',
'orderitem_audit_code': 'NO_COST',
'transaction_audit_code': 'NO_TRANSACTION',
'transaction_id': None,
})
def test_white_label_order_no_transaction(self):
inputs = [self.create_orderitem(**{
'order_processor': 'shoppingcart',
'line_item_product_id': '2',
}), ]
self._check_output(inputs, {
'order_audit_code': 'ORDER_NOT_BALANCED',
'orderitem_audit_code': 'NO_TRANS_WHITE_LABEL',
'transaction_audit_code': 'NO_TRANSACTION',
'transaction_id': None,
})
def test_orderitems_from_different_orders(self):
inputs = [
self.create_orderitem(order_id='order1'),
self.create_orderitem(order_id='order2'),
]
with self.assertRaisesRegexp(Exception, 'different order_ids'):
self._check_output(inputs, {})
def test_no_order(self):
purchase = self.create_transaction()
self._check_output([purchase], {
'order_audit_code': 'ERROR_NO_ORDER_NONZERO_BALANCE',
'orderitem_audit_code': 'NO_ORDERITEM',
'transaction_audit_code': 'PURCHASE',
'order_id': None,
'unique_order_id': None,
'order_timestamp': None,
'order_line_item_id': None,
'unique_order_line_item_id': None,
'order_line_item_product_id': None,
'order_line_item_price': None,
'order_line_item_unit_price': None,
'order_line_item_quantity': None,
'order_refunded_amount': None,
'order_refunded_quantity': None,
'order_user_id': None,
'order_username': None,
'order_user_email': None,
'order_product_class': None,
'order_product_detail': None,
'order_course_id': None,
'order_org_id': None,
'order_processor': None,
})
def test_no_order_refunded(self):
purchase = self.create_transaction()
refund = self.create_refunding_transaction()
self._check_output([purchase, refund], [
{'transaction_audit_code': 'PURCHASE'},
{'transaction_audit_code': 'REFUND'},
], **{
'order_audit_code': 'NO_ORDER_ZERO_BALANCE',
'orderitem_audit_code': 'NO_ORDERITEM',
})
###################################
# Single ORDERITEM tests:
###################################
#### Single purchase ####
def test_normal_purchase(self):
orderitem = self.create_orderitem()
purchase = self.create_transaction()
self._check_output([orderitem, purchase], {
'order_audit_code': 'ORDER_BALANCED',
'orderitem_audit_code': 'PURCHASED_BALANCE_MATCHING',
'transaction_audit_code': 'PURCHASE_ONE',
'transaction_amount_per_item': '50.00',
'transaction_fee_per_item': None,
})
def test_purchase_with_fee(self):
orderitem = self.create_orderitem()
purchase = self.create_transaction(transaction_fee='5.00')
self._check_output([orderitem, purchase], {
'order_audit_code': 'ORDER_BALANCED',
'orderitem_audit_code': 'PURCHASED_BALANCE_MATCHING',
'transaction_audit_code': 'PURCHASE_ONE',
'transaction_amount_per_item': '50.00',
'transaction_fee_per_item': '5.00',
})
def test_white_label_purchase(self):
orderitem = self.create_orderitem(**{
'order_processor': 'shoppingcart',
'line_item_product_id': '2',
})
purchase = self.create_transaction()
self._check_output([orderitem, purchase], {
'order_audit_code': 'ORDER_BALANCED',
'orderitem_audit_code': 'ERROR_WHITE_LABEL_PURCHASED_BALANCE_MATCHING',
'transaction_audit_code': 'PURCHASE_ONE',
})
@data(
(False, '100.00', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_CHARGE'),
(False, '10.00', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_UNDER_CHARGE'),
(True, '100.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_CHARGE'),
(True, '50.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_REFUND_MISSING'),
(True, '10.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_UNDER_CHARGE'),
)
@unpack
def test_mischarge_purchase(self, is_refunded_order, amount, orderitem_status):
orderitem = self.create_orderitem(is_refunded=is_refunded_order)
purchase = self.create_transaction(amount=amount)
self._check_output([orderitem, purchase], {
'order_audit_code': 'ERROR_ORDER_NOT_BALANCED',
'orderitem_audit_code': orderitem_status,
'transaction_audit_code': 'PURCHASE_MISCHARGE' if amount != '50.00' else 'PURCHASE_ONE',
})
@data(
(False, '-100.00', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_REFUND'),
(False, '-50.00', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_REFUND'),
(False, '-10.00', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_REFUND'),
(True, '-100.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_REFUND'),
(True, '-50.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_REFUND'),
(True, '-10.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_REFUND'),
)
@unpack
def test_refund_without_purchase(self, is_refunded_order, amount, orderitem_status):
orderitem = self.create_orderitem(is_refunded=is_refunded_order)
purchase = self.create_transaction(amount=amount)
self._check_output([orderitem, purchase], {
'order_audit_code': 'ERROR_ORDER_NOT_BALANCED',
'orderitem_audit_code': orderitem_status,
'transaction_audit_code': 'REFUND_FIRST' if amount != '-50.00' else 'REFUND_NEVER_PURCHASED',
})
#### Two purchases ####
@data(
(False, '100.00', 'PURCHASE_FIRST', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_CHARGE'),
(False, '50.00', 'PURCHASE_AGAIN', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_WAS_CHARGED_TWICE'),
(False, '10.00', 'PURCHASE_FIRST', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_CHARGE'),
(True, '100.00', 'PURCHASE_FIRST', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_CHARGE'),
(True, '50.00', 'PURCHASE_AGAIN', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_WAS_CHARGED_TWICE'),
(True, '10.00', 'PURCHASE_FIRST', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_CHARGE'),
)
@unpack
def test_second_purchase(self, is_refunded_order, amount, transaction_status, orderitem_status):
orderitem = self.create_orderitem(is_refunded=is_refunded_order)
purchase1 = self.create_transaction()
purchase2 = self.create_transaction(amount=amount)
self._check_output([orderitem, purchase1, purchase2], [
{'transaction_audit_code': 'PURCHASE_ONE'},
{'transaction_audit_code': transaction_status},
], **{
'order_audit_code': 'ERROR_ORDER_NOT_BALANCED',
'orderitem_audit_code': orderitem_status,
})
def test_two_transactions_to_purchase(self):
orderitem = self.create_orderitem()
purchase1 = self.create_transaction(amount='25.00')
purchase2 = self.create_transaction(amount='25.00', transaction_id=SECOND_TRANSACTION)
self._check_output([orderitem, purchase1, purchase2], [
{'transaction_audit_code': 'PURCHASE_MISCHARGE', 'transaction_id': FIRST_TRANSACTION},
{'transaction_audit_code': 'PURCHASE_FIRST', 'transaction_id': SECOND_TRANSACTION},
], **{
'order_audit_code': 'ORDER_BALANCED',
'orderitem_audit_code': 'PURCHASED_BALANCE_MATCHING',
})
#### One purchase and one refund ####
def test_normal_refund(self):
orderitem = self.create_orderitem(is_refunded=True)
purchase = self.create_transaction()
refund = self.create_refunding_transaction()
self._check_output([orderitem, purchase, refund], [
{'transaction_audit_code': 'PURCHASE_ONE'},
{'transaction_audit_code': 'REFUND_ONE'},
], **{
'order_audit_code': 'ORDER_BALANCED',
'orderitem_audit_code': 'REFUNDED_BALANCE_MATCHING',
})
@data(
(True, '-100.00', 'REFUND_FIRST', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_REFUND'),
(True, '-10.00', 'REFUND_FIRST', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_PARTIAL_REFUND'),
(False, '-100.00', 'REFUND_FIRST', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_REFUND'),
(False, '-10.00', 'REFUND_FIRST', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_PARTIAL_REFUND'),
(False, '-50.00', 'REFUND_ONE_STATUS_NOT_REFUNDED', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_WAS_REFUNDED'),
)
@unpack
def test_mischarge_refund(self, is_refunded_order, amount, transaction_status, orderitem_status):
orderitem = self.create_orderitem(is_refunded=is_refunded_order)
purchase = self.create_transaction()
refund = self.create_refunding_transaction(amount=amount)
self._check_output([orderitem, purchase, refund], [
{'transaction_audit_code': 'PURCHASE_ONE'},
{'transaction_audit_code': transaction_status},
], **{
'order_audit_code': 'ERROR_ORDER_NOT_BALANCED',
'orderitem_audit_code': orderitem_status
})
#### One purchase and two refunds ####
@data(
(True, '-100.00', 'REFUND_FIRST', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_REFUND'),
(True, '-50.00', 'REFUND_AGAIN', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_WAS_REFUNDED_TWICE'),
(True, '-10.00', 'REFUND_FIRST', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_REFUND'),
(False, '-100.00', 'REFUND_FIRST', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_REFUND'),
(False, '-50.00', 'REFUND_AGAIN_STATUS_NOT_REFUNDED', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_WAS_REFUNDED_TWICE'),
(False, '-10.00', 'REFUND_FIRST', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_REFUND'),
)
@unpack
def test_extra_refund(self, is_refunded_order, amount, transaction_status, orderitem_status):
orderitem = self.create_orderitem(is_refunded=is_refunded_order)
purchase = self.create_transaction()
refund1 = self.create_refunding_transaction()
refund2 = self.create_refunding_transaction(amount=amount)
self._check_output([orderitem, purchase, refund1, refund2], [
{'transaction_audit_code': 'PURCHASE_ONE'},
{'transaction_audit_code': 'REFUND_ONE' if is_refunded_order else 'REFUND_ONE_STATUS_NOT_REFUNDED'},
{'transaction_audit_code': transaction_status},
], **{
'order_audit_code': 'ERROR_ORDER_NOT_BALANCED',
'orderitem_audit_code': orderitem_status,
})
def test_double_refund_to_refund(self):
orderitem = self.create_orderitem(is_refunded=True)
purchase = self.create_transaction()
refund1 = self.create_refunding_transaction(amount='-30.00')
refund2 = self.create_refunding_transaction(amount='-20.00')
self._check_output([orderitem, purchase, refund1, refund2], [
{'transaction_audit_code': 'PURCHASE_ONE'},
{'transaction_audit_code': 'REFUND_FIRST'},
{'transaction_audit_code': 'REFUND_FIRST'},
], **{
'order_audit_code': 'ORDER_BALANCED',
'orderitem_audit_code': 'REFUNDED_BALANCE_MATCHING',
})
###################################
# Multiple ORDERITEM tests:
###################################
@data(
(False, 'PURCHASED_BALANCE_MATCHING', 'ORDER_BALANCED'),
(True, 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_REFUND_MISSING', 'ERROR_ORDER_NOT_BALANCED'),
)
@unpack
def test_single_purchase_two_orderitems(self, is_refunded_order, orderitem_status, order_status):
orderitem1 = self.create_orderitem(is_refunded=is_refunded_order)
orderitem2 = self.create_orderitem(line_item_id=SECOND_ORDER_ITEM)
purchase = self.create_transaction(amount='100.00')
self._check_output([orderitem1, orderitem2, purchase], [
{
'order_line_item_id': FIRST_ORDER_ITEM,
'orderitem_audit_code': orderitem_status,
'transaction_audit_code': 'PURCHASE_ONE',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'orderitem_audit_code': 'PURCHASED_BALANCE_MATCHING',
'transaction_audit_code': 'PURCHASE_ONE',
},
], **{
'order_audit_code': order_status,
})
@data(
(False, 'PURCHASED_BALANCE_MATCHING', 'ORDER_BALANCED'),
(True, 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_REFUND_MISSING', 'ERROR_ORDER_NOT_BALANCED'),
)
@unpack
def test_single_purchase_two_orderitems_with_fee(self, is_refunded_order, orderitem_status, order_status):
orderitem1 = self.create_orderitem(is_refunded=is_refunded_order)
orderitem2 = self.create_orderitem(line_item_id=SECOND_ORDER_ITEM)
purchase = self.create_transaction(amount='100.00', transaction_fee='5.00')
self._check_output([orderitem1, orderitem2, purchase], [
{
'order_line_item_id': FIRST_ORDER_ITEM,
'orderitem_audit_code': orderitem_status,
'transaction_audit_code': 'PURCHASE_ONE',
'transaction_amount_per_item': '50.00',
'transaction_fee': '5.00',
'transaction_fee_per_item': '2.50',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'orderitem_audit_code': 'PURCHASED_BALANCE_MATCHING',
'transaction_audit_code': 'PURCHASE_ONE',
'transaction_amount_per_item': '50.00',
'transaction_fee': '5.00',
'transaction_fee_per_item': '2.50',
},
], **{
'order_audit_code': order_status,
})
def test_single_purchase_two_orderitems_with_rounded_fee(self):
orderitem1 = self.create_orderitem()
orderitem2 = self.create_orderitem(line_item_id=SECOND_ORDER_ITEM)
purchase = self.create_transaction(amount='100.00', transaction_fee='1.01')
self._check_output([orderitem1, orderitem2, purchase], [
{
'order_line_item_id': FIRST_ORDER_ITEM,
'transaction_audit_code': 'PURCHASE_ONE',
'transaction_amount_per_item': '50.00',
'transaction_fee_per_item': '0.50',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'transaction_audit_code': 'PURCHASE_ONE',
'transaction_amount_per_item': '50.00',
'transaction_fee_per_item': '0.51',
},
])
@data(
(False, '130.00', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_CHARGE', 'ERROR_ORDER_NOT_BALANCED'),
(False, '50.00', 'PURCHASED_BALANCE_MATCHING', 'ERROR_ORDER_NOT_BALANCED'),
(False, '10.00', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_UNDER_CHARGE', 'ERROR_ORDER_NOT_BALANCED'),
(True, '130.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_CHARGE', 'ERROR_ORDER_NOT_BALANCED'),
(True, '50.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_REFUND_MISSING', 'ORDER_BALANCED'),
(True, '10.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_UNDER_CHARGE', 'ERROR_ORDER_NOT_BALANCED'),
)
@unpack
def test_purchase_of_one_out_of_two(self, is_refunded_order, amount, orderitem_status, order_status):
orderitem1 = self.create_orderitem(is_refunded=is_refunded_order)
orderitem2 = self.create_orderitem(line_item_id=SECOND_ORDER_ITEM)
purchase = self.create_transaction(amount=amount)
self._check_output([orderitem1, orderitem2, purchase], [
{
'order_line_item_id': FIRST_ORDER_ITEM,
'orderitem_audit_code': orderitem_status,
'transaction_audit_code': 'PURCHASE_MISCHARGE' if amount != '50.00' else 'PURCHASE_ONE',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'orderitem_audit_code': 'ERROR_NO_TRANSACTION',
'transaction_audit_code': 'NO_TRANSACTION',
},
], **{
'order_audit_code': order_status,
})
@data(
(False, '70.00', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_OVER_CHARGE', 'ERROR_ORDER_NOT_BALANCED'),
(False, '50.00', 'PURCHASED_BALANCE_MATCHING', 'ERROR_ORDER_NOT_BALANCED'),
(False, '10.00', 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_UNDER_CHARGE', 'ERROR_ORDER_NOT_BALANCED'),
(True, '70.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_OVER_CHARGE', 'ERROR_ORDER_NOT_BALANCED'),
(True, '50.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_REFUND_MISSING', 'ERROR_ORDER_NOT_BALANCED'),
(True, '10.00', 'ERROR_REFUNDED_BALANCE_NOT_MATCHING_UNDER_CHARGE', 'ERROR_ORDER_NOT_BALANCED'),
)
@unpack
def test_purchase_one_out_of_two_unequal(self, is_refunded_order, amount, orderitem_status, order_status):
orderitem1 = self.create_orderitem(is_refunded=is_refunded_order)
orderitem2 = self.create_orderitem(**{
'line_item_id': SECOND_ORDER_ITEM,
'line_item_price': '30.00',
'line_item_unit_price': '30.00'
})
purchase = self.create_transaction(amount=amount)
self._check_output([orderitem1, orderitem2, purchase], [
{
'order_line_item_id': FIRST_ORDER_ITEM,
'orderitem_audit_code': orderitem_status,
'transaction_audit_code': 'PURCHASE_MISCHARGE' if amount not in ['50.00'] else 'PURCHASE_ONE',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'orderitem_audit_code': 'ERROR_NO_TRANSACTION',
'transaction_audit_code': 'NO_TRANSACTION',
},
], **{
'order_audit_code': order_status,
})
@data(
(False, 'ERROR_ORDER_NOT_BALANCED'),
(True, 'ORDER_BALANCED'),
)
@unpack
def test_purchase_matching_second_one_out_of_two_unequal(self, is_refunded_order, order_status):
orderitem1 = self.create_orderitem(is_refunded=is_refunded_order)
orderitem2 = self.create_orderitem(**{
'line_item_id': SECOND_ORDER_ITEM,
'line_item_price': '30.00',
'line_item_unit_price': '30.00'
})
purchase = self.create_transaction(amount='30.00')
self._check_output([orderitem1, orderitem2, purchase], [
{
'order_line_item_id': FIRST_ORDER_ITEM,
'orderitem_audit_code': 'ERROR_NO_TRANSACTION',
'transaction_audit_code': 'NO_TRANSACTION',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'orderitem_audit_code': 'PURCHASED_BALANCE_MATCHING',
'transaction_audit_code': 'PURCHASE_ONE',
},
], **{
'order_audit_code': order_status,
})
def test_two_orderitems_refund_one(self):
orderitem1 = self.create_orderitem(is_refunded=True)
orderitem2 = self.create_orderitem(line_item_id=SECOND_ORDER_ITEM)
purchase = self.create_transaction(amount='100.00')
refund = self.create_refunding_transaction()
self._check_output([orderitem1, orderitem2, purchase, refund], [
# Output is in order by orderitem, then transaction.
{
'order_line_item_id': FIRST_ORDER_ITEM,
'transaction_id': FIRST_TRANSACTION,
'orderitem_audit_code': 'REFUNDED_BALANCE_MATCHING',
'transaction_audit_code': 'PURCHASE_ONE',
},
{
'order_line_item_id': FIRST_ORDER_ITEM,
'transaction_id': SECOND_TRANSACTION,
'orderitem_audit_code': 'REFUNDED_BALANCE_MATCHING',
'transaction_audit_code': 'REFUND_ONE',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'transaction_id': FIRST_TRANSACTION,
'orderitem_audit_code': 'PURCHASED_BALANCE_MATCHING',
'transaction_audit_code': 'PURCHASE_ONE',
},
], **{
'order_audit_code': 'ORDER_BALANCED',
})
def test_two_orderitems_refund_one_with_fee(self):
orderitem1 = self.create_orderitem(
is_refunded=True,
line_item_price='100.00',
line_item_unit_price='100.00',
refunded_amount='100.00',
)
orderitem2 = self.create_orderitem(is_refunded=True, line_item_id=SECOND_ORDER_ITEM)
purchase = self.create_transaction(amount='150.00', transaction_fee='10.00')
refund = self.create_refunding_transaction(amount='-150.00', transaction_fee='-5.00')
self._check_output([orderitem1, orderitem2, purchase, refund], [
# Output is in order by orderitem, then transaction.
{
'order_line_item_id': FIRST_ORDER_ITEM,
'transaction_id': FIRST_TRANSACTION,
'transaction_audit_code': 'PURCHASE_ONE',
'transaction_fee': '10.00',
'transaction_fee_per_item': '6.67',
},
{
'order_line_item_id': FIRST_ORDER_ITEM,
'transaction_id': SECOND_TRANSACTION,
'transaction_audit_code': 'REFUND_ONE',
'transaction_fee': '-5.00',
'transaction_fee_per_item': '-3.33',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'transaction_id': FIRST_TRANSACTION,
'transaction_audit_code': 'PURCHASE_ONE',
'transaction_fee': '10.00',
'transaction_fee_per_item': '3.33',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'transaction_id': SECOND_TRANSACTION,
'transaction_audit_code': 'REFUND_ONE',
'transaction_fee': '-5.00',
'transaction_fee_per_item': '-1.67',
},
], **{
'order_audit_code': 'ORDER_BALANCED',
'orderitem_audit_code': 'REFUNDED_BALANCE_MATCHING',
})
def test_two_orderitems_extra_purchase(self):
orderitem1 = self.create_orderitem()
orderitem2 = self.create_orderitem(line_item_id=SECOND_ORDER_ITEM)
purchase = self.create_transaction(amount='100.00')
purchase2 = self.create_transaction(transaction_id=SECOND_TRANSACTION)
self._check_output([orderitem1, orderitem2, purchase, purchase2], [
# Output is in order by orderitem, then transaction.
{
'order_line_item_id': FIRST_ORDER_ITEM,
'transaction_id': FIRST_TRANSACTION,
'orderitem_audit_code': 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_WAS_CHARGED_TWICE',
'transaction_audit_code': 'PURCHASE_ONE',
},
{
'order_line_item_id': FIRST_ORDER_ITEM,
'transaction_id': SECOND_TRANSACTION,
'orderitem_audit_code': 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_WAS_CHARGED_TWICE',
'transaction_audit_code': 'PURCHASE_AGAIN',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'transaction_id': FIRST_TRANSACTION,
'orderitem_audit_code': 'PURCHASED_BALANCE_MATCHING',
'transaction_audit_code': 'PURCHASE_ONE',
},
], **{
'order_audit_code': 'ERROR_ORDER_NOT_BALANCED',
})
@data(
[0, 1, 2, 3, 4],
[4, 3, 2, 1, 0],
[0, 2, 4, 3, 1],
[2, 4, 1, 3, 0]
)
def test_two_orderitems_refund_both_wrongstatus(self, permutation):
orderitem1 = self.create_orderitem(is_refunded=True)
orderitem2 = self.create_orderitem(line_item_id=SECOND_ORDER_ITEM)
purchase = self.create_transaction(amount='100.00')
refund = self.create_refunding_transaction()
refund2 = self.create_refunding_transaction(transaction_id=THIRD_TRANSACTION)
inputs = [orderitem1, orderitem2, purchase, refund, refund2]
permuted_input = [inputs[index] for index in permutation]
self._check_output(permuted_input, [
# Output is in order by orderitem, then transaction.
{
'order_line_item_id': FIRST_ORDER_ITEM,
'transaction_id': FIRST_TRANSACTION,
'orderitem_audit_code': 'REFUNDED_BALANCE_MATCHING',
'transaction_audit_code': 'PURCHASE_ONE',
},
{
'order_line_item_id': FIRST_ORDER_ITEM,
'transaction_id': SECOND_TRANSACTION,
'orderitem_audit_code': 'REFUNDED_BALANCE_MATCHING',
'transaction_audit_code': 'REFUND_ONE',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'transaction_id': FIRST_TRANSACTION,
'orderitem_audit_code': 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_WAS_REFUNDED',
'transaction_audit_code': 'PURCHASE_ONE',
},
{
'order_line_item_id': SECOND_ORDER_ITEM,
'transaction_id': THIRD_TRANSACTION,
'orderitem_audit_code': 'ERROR_PURCHASED_BALANCE_NOT_MATCHING_WAS_REFUNDED',
'transaction_audit_code': 'REFUND_ONE_STATUS_NOT_REFUNDED',
},
], **{'order_audit_code': 'ERROR_ORDER_NOT_BALANCED'}
)
|
sssllliang/edx-analytics-pipeline
|
edx/analytics/tasks/reports/tests/test_reconcile.py
|
Python
|
agpl-3.0
| 36,792
|
import time
import random
from random import randint
# from library import Trigger, Axis
# from library import PS4
from library import Joystick
import RPi.GPIO as GPIO # remove!!!
from emotions import angry, happy, confused
# from pysabertooth import Sabertooth
# from smc import SMC
from library import LEDDisplay
from library import factory
from library import reset_all_hw
# Leg Motor Speed Global
global_LegMotor = 70
# # Happy Emotion
# def happy(leds, servos, mc, audio):
# print("4")
# print("Happy")
#
# # Dome Motor Initialization
# # mc = SMC(dome_motor_port, 115200)
# # mc.init()
#
# # Spins Motor
# # mc.init()
# mc.speed(3200)
#
# # LED Matrix Green
# # breadboard has mono
# # R2 has bi-color leds
# # mono:0 bi:1
# # led_type = 0
# # leds = [0]*5
# # leds[1] = LEDDisplay(0x70, led_type)
# # leds[2] = LEDDisplay(0x71, led_type)
# # leds[3] = LEDDisplay(0x72, led_type)
# # leds[4] = LEDDisplay(0x73, led_type)
#
# for x in [0, 1, 2, 3, 4, 5, 6, 7]:
# for y in [0, 1, 2, 3, 4, 5, 6, 7]:
# for i in range(1, 5):
# leds[i].set(x, y, 1)
#
# for i in range(1, 5):
# leds[i].write()
#
# # Servo Wave
# # s0.angle = 0
# # time.sleep(0.2)
# # s1.angle = 0
# # time.sleep(0.2)
# # s2.angle = 0
# # time.sleep(0.2)
# # s3.angle = 0
# # time.sleep(0.2)
# # s4.angle = 0
# # time.sleep(0.5)
# # s4.angle = 130
# # time.sleep(0.2)
# # s3.angle = 130
# # time.sleep(0.2)
# # s2.angle = 130
# # time.sleep(0.2)
# # s1.angle = 130
# # time.sleep(0.2)
# # s0.angle = 130
#
# for a in [0, 130]:
# for i in range(4):
# servos[i].angle = a
# time.sleep(0.2)
# time.sleep(0.5)
#
# time.sleep(1.5)
# mc.stop()
# time.sleep(1.5)
# for i in range(1, 5):
# leds[i].clear()
#
#
# # Confused Emotion
# def confused(leds, servos, mc, audio):
# print("5")
# print("Confused")
# # LED Matrix Yellow
# # leds = [0]*5
# # leds[1] = LEDDisplay(0x70, 1)
# # leds[2] = LEDDisplay(0x71, 1)
# # leds[3] = LEDDisplay(0x72, 1)
# # leds[4] = LEDDisplay(0x73, 1)
#
# for x in [0, 1, 2, 3, 4, 5, 6, 7]:
# for y in [0, 1, 2, 3, 4, 5, 6, 7]:
# for i in range(1, 5):
# leds[i].set(x, y, 3)
# for i in range(1, 5):
# leds[i].write()
# time.sleep(3)
# for i in range(1, 5):
# leds[i].clear()
#
#
# # Angry Emotion
# def angry(leds, servos, mc, audio):
# print("6")
# print("Angry")
# # LED Matrix Red
# # leds = [0]*5
# # leds[1] = LEDDisplay(0x70, 1)
# # leds[2] = LEDDisplay(0x71, 1)
# # leds[3] = LEDDisplay(0x72, 1)
# # leds[4] = LEDDisplay(0x73, 1)
#
# for x in [0, 1, 2, 3, 4, 5, 6, 7]:
# for y in [0, 1, 2, 3, 4, 5, 6, 7]:
# for i in range(1, 5):
# leds[i].set(x, y, 2)
#
# for i in range(1, 5):
# leds[i].write()
#
# # Plays Imperial Theme Sound
# audio.sound('imperial')
#
# # Servo Open and Close
# # s0.angle = 0
# # s1.angle = 0
# # s2.angle = 0
# # s3.angle = 0
# # s4.angle = 0
# # time.sleep(1)
# # s4.angle = 130
# # s3.angle = 130
# # s2.angle = 130
# # s1.angle = 130
# # s0.angle = 130
#
# for a in [0, 130]:
# for i in range(5):
# servos[i].angle = a
# time.sleep(1)
#
# time.sleep(3)
# for i in range(1, 5):
# leds[i].clear()
#######################################
# original remote
#######################################
# # Remote Mode
# def remote(remoteflag, namespace):
# print("Remote")
#
# # create objects
# (leds, dome, legs, servos, Flash) = factory(['leds', 'dome', 'legs', 'servos', 'flashlight'])
#
# # initalize everything
# dome.init()
# dome.speed(0)
#
# legs.drive(1, 0)
# legs.drive(2, 0)
#
# for s in servos:
# s.angle = 0
# time.sleep(0.25)
#
# # what is this???
# GPIO.setmode(GPIO.BCM)
# GPIO.setwarnings(False)
# GPIO.setup(26, GPIO.OUT)
#
# # Joystick Initialization
# js = Joystick()
#
# # get audio
# audio = namespace.audio
#
# # Flash = FlashlightPWM(15)
# # Flash = namespace.flashlight
#
# while(remoteflag.is_set()):
# try:
# # Button Initialization
# ps4 = js.get()
# btnSquare = ps4.buttons[0]
# btnTriangle = ps4.buttons[1]
# btnCircle = ps4.buttons[2]
# btnX = ps4.buttons[3]
# btnLeftStickLeftRight = ps4.leftStick.y
# btnLeftStickUpDown = ps4.leftStick.x
# btnRightStickLeftRight = ps4.rightStick.y
# btnRightStickUpDown = ps4.rightStick.x
# Left1 = ps4.shoulder[0]
# Right1 = ps4.shoulder[1]
# Left2 = ps4.triggers.x
# Right2 = ps4.triggers.y
# hat = ps4.hat
#
# # print("PRINT")
#
# # Button Controls
# if hat == 1:
# # Happy Emotion
# print("Arrow Up Pressed")
# happy(leds, servos, dome, audio) # namespace.emotions['happy'](leds, servos, mc, audio)
# if hat == 8:
# # Confused Emotion
# print("Arrow Left Pressed")
# confused(leds, servos, dome, audio)
# if hat == 2:
# # Angry Emotion
# print("Arrow Right Pressed")
# angry(leds, servos, dome, audio)
# if hat == 4:
# print("Arrow Down Pressed")
# if btnSquare == 1:
# # word = random_char(2)
# audio.speak_random(2)
# time.sleep(0.5)
# if btnTriangle == 1:
# # FlashLight ON
# GPIO.output(26, GPIO.HIGH)
# Flash.pwm.set_pwm(15, 0, 130)
# if btnCircle == 1:
# # FlashLight OFF
# GPIO.output(26, GPIO.LOW)
# Flash.pwm.set_pwm(15, 0, 0)
# if btnX == 1:
# for x in [0, 1, 2, 3, 4, 5, 6, 7]:
# for y in [0, 1, 2, 3, 4, 5, 6, 7]:
# if x == randint(0, 8) or y == randint(0, 8):
# for i in range(1, 5):
# leds[i].set(x, y, randint(0, 4))
# else:
# for i in range(1, 5):
# leds[i].set(x, y, 4)
# for i in range(1, 5):
# leds[i].write()
# time.sleep(0.1)
# for i in range(1, 5):
# leds[i].clear()
# if Left1 == 1:
# # Dome Motor Forward
# dome.speed(3200)
# time.sleep(2)
# dome.speed(0)
# if Right1 == 1:
# # Dome Motor Backward
# dome.speed(-3200)
# time.sleep(2)
# dome.speed(0)
# # if Left1 == 0 or Right1 == 0:
# # # Dome Motor Stop
# # dome.speed(0)
# # if Left2 > 1:
# # # Servo Open
# # s0.angle = 0
# # s1.angle = 0
# # s2.angle = 0
# # s3.angle = 0
# # s4.angle = 0
# # Flash.pwm.set_pwm(15, 0, 3000)
# #
# # if Right2 > 1:
# # # Servo Close
# # s0.angle = 130
# # s1.angle = 130
# # s2.angle = 130
# # s3.angle = 130
# # s4.angle = 130
# # Flash.pwm.set_pwm(15, 0, 130)
# if Left2 > 1:
# for s in servos:
# s.angle = 0
# time.sleep(0.25)
# Flash.pwm.set_pwm(15, 0, 300)
# if Right2 > 1:
# for s in servos:
# s.angle = 130
# time.sleep(0.25)
# Flash.pwm.set_pwm(15, 0, 130)
# if btnLeftStickLeftRight < 0.3 and btnLeftStickLeftRight > -0.3:
# legs.drive(1, 0)
# if btnRightStickUpDown < 0.3 and btnRightStickUpDown > -0.3:
# legs.drive(2, 0)
# if btnRightStickUpDown >= 0.3:
# # Right and Left Motor Forward
# legs.drive(1, btnRightStickUpDown*global_LegMotor)
# legs.drive(2, btnRightStickUpDown*-global_LegMotor)
# if btnRightStickUpDown <= -0.3:
# # Right and Left Motor Backward
# legs.drive(1, btnRightStickUpDown*global_LegMotor)
# legs.drive(2, btnRightStickUpDown*-global_LegMotor)
# if btnLeftStickLeftRight <= 0.3:
# # Turn Left
# legs.drive(1, btnLeftStickLeftRight*(-global_LegMotor))
# legs.drive(2, btnLeftStickLeftRight*-global_LegMotor)
# if btnLeftStickLeftRight >= -0.3:
# # Turn Right
# legs.drive(1, btnLeftStickLeftRight*(-global_LegMotor))
# legs.drive(2, btnLeftStickLeftRight*-global_LegMotor)
#
# except KeyboardInterrupt:
# print('js exiting ...')
# return
# return
def remote_func(hw, ns):
print("Remote")
dome = hw['dome']
dome.speed(0)
legs = hw['legs']
legs.drive(1, 0)
legs.drive(2, 0)
flashlight = hw['flashlight']
audio = hw['audio']
audio.speak('start')
while ns.current_state == 3:
print('remote ...')
spd = random.randint(0, 40)
legs.drive(1, spd)
legs.drive(2, spd)
dome.speed(spd)
time.sleep(0.5)
legs.drive(1, 0)
legs.drive(2, 0)
dome.speed(0)
time.sleep(0.1)
return
###### real loop here #####
# Joystick Initialization
js = Joystick()
while ns.current_state == 3:
try:
# Button Initialization
ps4 = js.get()
btnSquare = ps4.buttons[0]
btnTriangle = ps4.buttons[1]
btnCircle = ps4.buttons[2]
btnX = ps4.buttons[3]
btnLeftStickLeftRight = ps4.leftStick.y
btnLeftStickUpDown = ps4.leftStick.x
btnRightStickLeftRight = ps4.rightStick.y
btnRightStickUpDown = ps4.rightStick.x
Left1 = ps4.shoulder[0]
Right1 = ps4.shoulder[1]
Left2 = ps4.triggers.x
Right2 = ps4.triggers.y
hat = ps4.hat
# print("PRINT")
# Button Controls
if hat == 1:
# Happy Emotion
print("Arrow Up Pressed")
happy(leds, servos, dome, audio) # namespace.emotions['happy'](leds, servos, mc, audio)
if hat == 8:
# Confused Emotion
print("Arrow Left Pressed")
confused(leds, servos, dome, audio)
if hat == 2:
# Angry Emotion
print("Arrow Right Pressed")
angry(leds, servos, dome, audio)
if hat == 4:
print("Arrow Down Pressed")
if btnSquare == 1:
# word = random_char(2)
audio.speak_random(2)
time.sleep(0.5)
if btnTriangle == 1:
# FlashLight ON
GPIO.output(26, GPIO.HIGH)
Flash.pwm.set_pwm(15, 0, 130)
if btnCircle == 1:
# FlashLight OFF
GPIO.output(26, GPIO.LOW)
Flash.pwm.set_pwm(15, 0, 0)
if btnX == 1:
for x in [0, 1, 2, 3, 4, 5, 6, 7]:
for y in [0, 1, 2, 3, 4, 5, 6, 7]:
if x == randint(0, 8) or y == randint(0, 8):
for i in range(1, 5):
leds[i].set(x, y, randint(0, 4))
else:
for i in range(1, 5):
leds[i].set(x, y, 4)
for i in range(1, 5):
leds[i].write()
time.sleep(0.1)
for i in range(1, 5):
leds[i].clear()
if Left1 == 1:
# Dome Motor Forward
dome.speed(3200)
time.sleep(2)
dome.speed(0)
if Right1 == 1:
# Dome Motor Backward
dome.speed(-3200)
time.sleep(2)
dome.speed(0)
# if Left1 == 0 or Right1 == 0:
# # Dome Motor Stop
# dome.speed(0)
# if Left2 > 1:
# # Servo Open
# s0.angle = 0
# s1.angle = 0
# s2.angle = 0
# s3.angle = 0
# s4.angle = 0
# Flash.pwm.set_pwm(15, 0, 3000)
#
# if Right2 > 1:
# # Servo Close
# s0.angle = 130
# s1.angle = 130
# s2.angle = 130
# s3.angle = 130
# s4.angle = 130
# Flash.pwm.set_pwm(15, 0, 130)
if Left2 > 1:
for s in servos:
s.angle = 0
time.sleep(0.25)
Flash.pwm.set_pwm(15, 0, 300)
if Right2 > 1:
for s in servos:
s.angle = 130
time.sleep(0.25)
Flash.pwm.set_pwm(15, 0, 130)
if btnLeftStickLeftRight < 0.3 and btnLeftStickLeftRight > -0.3:
legs.drive(1, 0)
if btnRightStickUpDown < 0.3 and btnRightStickUpDown > -0.3:
legs.drive(2, 0)
if btnRightStickUpDown >= 0.3:
# Right and Left Motor Forward
legs.drive(1, btnRightStickUpDown*global_LegMotor)
legs.drive(2, btnRightStickUpDown*-global_LegMotor)
if btnRightStickUpDown <= -0.3:
# Right and Left Motor Backward
legs.drive(1, btnRightStickUpDown*global_LegMotor)
legs.drive(2, btnRightStickUpDown*-global_LegMotor)
if btnLeftStickLeftRight <= 0.3:
# Turn Left
legs.drive(1, btnLeftStickLeftRight*(-global_LegMotor))
legs.drive(2, btnLeftStickLeftRight*-global_LegMotor)
if btnLeftStickLeftRight >= -0.3:
# Turn Right
legs.drive(1, btnLeftStickLeftRight*(-global_LegMotor))
legs.drive(2, btnLeftStickLeftRight*-global_LegMotor)
except KeyboardInterrupt:
print('js exiting ...')
return
# exiting, reset all hw
reset_all_hw(hw)
return
|
DFEC-R2D2/r2d2
|
pygecko/states/remote.py
|
Python
|
mit
| 11,721
|
from unittest import TestCase
from svnfiltereddump import InterestingPaths
class InterestingPathsTests(TestCase):
def test_empty_string(self):
interesting_path = InterestingPaths()
interesting_path.mark_path_as_interesting('')
self.assertTrue(interesting_path.is_interesting('a'))
self.assertTrue(interesting_path.is_interesting('b'))
dirs = sorted(interesting_path.get_interesting_sub_directories('a'))
self.assertEqual(dirs, ['a'])
dirs = sorted(interesting_path.get_interesting_sub_directories(''))
self.assertEqual(dirs, [''])
def test_simple_include(self):
interesting_path = InterestingPaths()
interesting_path.mark_path_as_interesting('a/b/c')
self.assertTrue(interesting_path.is_interesting('a/b/c'))
self.assertTrue(interesting_path.is_interesting('a/b/c/d'))
self.assertFalse(interesting_path.is_interesting('a/b'))
self.assertFalse(interesting_path.is_interesting('x/y'))
def test_include_with_exclude(self):
interesting_path = InterestingPaths()
interesting_path.mark_path_as_interesting('a/b/c')
interesting_path.mark_path_as_boring('a/b/c/x')
self.assertTrue(interesting_path.is_interesting('a/b/c'))
self.assertTrue(interesting_path.is_interesting('a/b/c/d'))
self.assertFalse(interesting_path.is_interesting('a/b'))
self.assertFalse(interesting_path.is_interesting('x/y'))
self.assertFalse(interesting_path.is_interesting('a/b/c/x'))
self.assertFalse(interesting_path.is_interesting('a/b/c/x/y'))
def test_get_interesting_sub_directories(self):
interesting_path = InterestingPaths()
interesting_path.mark_path_as_interesting('a/b/ca')
interesting_path.mark_path_as_interesting('a/b/cb')
interesting_path.mark_path_as_boring('a/b/ca/x')
interesting_path.mark_path_as_boring('a/y')
dirs = sorted(interesting_path.get_interesting_sub_directories('a/b'))
self.assertEqual(dirs, ['a/b/ca', 'a/b/cb'])
self.assertEqual([], interesting_path.get_interesting_sub_directories("a/b/c/x"))
def test_paths_with_trailing_slashes(self):
interesting_path = InterestingPaths()
interesting_path.mark_path_as_interesting('a/b/')
self.assertTrue(interesting_path.is_interesting('a/b'))
self.assertTrue(interesting_path.is_interesting('a/b/'))
|
TNG/svnfiltereddump
|
tests/TestInterestingPaths.py
|
Python
|
gpl-3.0
| 2,455
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/edit -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_edit
short_description: Modify, and idempotently manage openshift objects.
description:
- Modify openshift objects programmatically.
options:
state:
description:
- Currently present is only supported state.
required: true
default: present
choices: ["present"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: str
aliases: []
kind:
description:
- The kind attribute of the object.
required: True
default: None
choices:
- bc
- buildconfig
- configmaps
- dc
- deploymentconfig
- imagestream
- imagestreamtag
- is
- istag
- namespace
- project
- projects
- node
- ns
- persistentvolume
- pv
- rc
- replicationcontroller
- routes
- scc
- secret
- securitycontextconstraints
- service
- svc
aliases: []
file_name:
description:
- The file name in which to edit
required: false
default: None
aliases: []
file_format:
description:
- The format of the file being edited.
required: false
default: yaml
aliases: []
content:
description:
- Content of the file
required: false
default: None
aliases: []
edits:
description:
- a list of dictionaries with a yedit format for edits
required: false
default: None
aliases: []
force:
description:
- Whether or not to force the operation
required: false
default: None
aliases: []
separator:
description:
- The separator format for the edit.
required: false
default: '.'
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
oc_edit:
kind: rc
name: hawkular-cassandra-rc
namespace: openshift-infra
content:
spec.template.spec.containers[0].resources.limits.memory: 512
spec.template.spec.containers[0].resources.requests.memory: 256
'''
# -*- -*- -*- End included fragment: doc/edit -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
try:
# AUDIT:maybe-no-member makes sense due to different yaml libraries
# pylint: disable=maybe-no-member
curr_value = yaml.safe_load(invalue, Loader=yaml.RoundTripLoader)
except AttributeError:
curr_value = yaml.safe_load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_edit.py -*- -*- -*-
class Edit(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
kind,
namespace,
resource_name=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
separator='.',
verbose=False):
''' Constructor for OpenshiftOC '''
super(Edit, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
self.separator = separator
def get(self):
'''return a secret by name '''
return self._get(self.kind, self.name)
def update(self, file_name, content, edits, force=False, content_type='yaml'):
'''run update '''
if file_name:
if content_type == 'yaml':
data = yaml.load(open(file_name))
elif content_type == 'json':
data = json.loads(open(file_name).read())
yed = Yedit(filename=file_name, content=data, separator=self.separator)
# Keep this for compatibility
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([not change[0] for change in changes]):
return {'returncode': 0, 'updated': False}
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if not results['changed']:
return results
yed.write()
atexit.register(Utils.cleanup, [file_name])
return self._replace(file_name, force=force)
return self._replace_content(self.kind, self.name, content, edits, force=force, sep=self.separator)
@staticmethod
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocedit = Edit(params['kind'],
params['namespace'],
params['name'],
kubeconfig=params['kubeconfig'],
separator=params['separator'],
verbose=params['debug'])
api_rval = ocedit.get()
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
return {"failed": True, 'msg': api_rval}
########
# Update
########
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
api_rval = ocedit.update(params['file_name'],
params['content'],
params['edits'],
params['force'],
params['file_format'])
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
if 'updated' in api_rval and not api_rval['updated']:
return {"changed": False, 'results': api_rval, 'state': 'present'}
# return the created object
api_rval = ocedit.get()
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
return {"changed": True, 'results': api_rval, 'state': 'present'}
# -*- -*- -*- End included fragment: class/oc_edit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_edit.py -*- -*- -*-
def main():
'''
ansible oc module for editing objects
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kind=dict(required=True, type='str'),
file_name=dict(default=None, type='str'),
file_format=dict(default='yaml', type='str'),
content=dict(default=None, type='dict'),
force=dict(default=False, type='bool'),
separator=dict(default='.', type='str'),
edits=dict(default=None, type='list'),
),
supports_check_mode=True,
mutually_exclusive=[['content', 'edits']],
required_one_of=[['content', 'edits']],
)
rval = Edit.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_edit.py -*- -*- -*-
|
wbrefvem/openshift-ansible
|
roles/lib_openshift/library/oc_edit.py
|
Python
|
apache-2.0
| 55,673
|
from typing import Optional, Dict, Union, List
from .singleton import Singleton
class Arguments(metaclass=Singleton):
"""
Arguments singleton
"""
class Name:
SETTINGS_FILE: str = 'settings_file'
SETTINGS: str = 'settings'
INIT: str = 'init'
VERBOSE: str = 'verbose'
QUIET: str = 'quiet'
PREVIEW: str = 'preview'
FORMATS: str = 'formats'
VERSION: str = 'version'
OUTPUT: str = 'output'
CLIENT_ID: str = 'client_id'
CLIENT_SECRET: str = 'client_secret'
CHANNEL: str = 'channel'
USER: str = 'user'
INCLUDES: str = 'includes'
FIRST: str = 'first'
VIDEO: str = 'video'
FORMAT: str = 'format'
TIMEZONE: str = 'timezone'
DEBUG: str = 'debug'
LOG: str = 'log'
def __init__(self, arguments: Optional[Dict[str, Union[str, bool, int]]] = None):
"""
Initialize arguments
:param arguments: Arguments from cli (Optional to call singleton instance without parameters)
"""
if arguments is None:
print('Error: arguments were not provided')
exit()
# Required arguments and booleans
self.settings_file: str = arguments[Arguments.Name.SETTINGS_FILE]
self.settings: str = arguments[Arguments.Name.SETTINGS]
self.init: bool = arguments[Arguments.Name.INIT]
self.verbose: bool = arguments[Arguments.Name.VERBOSE]
self.debug: bool = arguments[Arguments.Name.DEBUG]
self.quiet: bool = arguments[Arguments.Name.QUIET]
self.preview: bool = arguments[Arguments.Name.PREVIEW]
self.print_formats: bool = arguments[Arguments.Name.FORMATS]
self.print_version: bool = arguments[Arguments.Name.VERSION]
self.output: str = arguments[Arguments.Name.OUTPUT]
self.log: bool = arguments[Arguments.Name.LOG]
# Optional or prompted arguments
self.client_id: Optional[str] = arguments[Arguments.Name.CLIENT_ID]
self.client_secret: Optional[str] = arguments[Arguments.Name.CLIENT_SECRET]
self.oauth_token: Optional[str] = None
self.first: Optional[int] = arguments[Arguments.Name.FIRST]
self.timezone: Optional[str] = arguments[Arguments.Name.TIMEZONE]
self.includes: Optional[str] = arguments[Arguments.Name.INCLUDES]
# Arguments that require some formatting
self.video_ids: List[int] = []
self.formats: List[str] = []
self.channels: List[str] = []
self.users: List[str] = []
# Videos
if arguments[Arguments.Name.VIDEO]:
self.video_ids = [int(video_id) for video_id in arguments[Arguments.Name.VIDEO].lower().split(',')]
# Formats
if arguments[Arguments.Name.FORMAT]:
self.formats: Optional[List[str]] = arguments[Arguments.Name.FORMAT].lower().split(',')
# Channels
if arguments[Arguments.Name.CHANNEL]:
self.channels = arguments[Arguments.Name.CHANNEL].lower().split(',')
# Users
if arguments[Arguments.Name.USER]:
self.users = arguments[Arguments.Name.USER].lower().split(',')
|
PetterKraabol/Twitch-Chat-Downloader
|
tcd/arguments.py
|
Python
|
mit
| 3,199
|
from south.db import db
from django.db import models
from ietf.ietfworkflows.models import *
class Migration:
def forwards(self, orm):
# Adding model 'StateDescription'
db.create_table('ietfworkflows_statedescription', (
('id', orm['ietfworkflows.statedescription:id']),
('state', orm['ietfworkflows.statedescription:state']),
('definition', orm['ietfworkflows.statedescription:definition']),
('order', orm['ietfworkflows.statedescription:order']),
))
db.send_create_signal('ietfworkflows', ['StateDescription'])
def backwards(self, orm):
# Deleting model 'StateDescription'
db.delete_table('ietfworkflows_statedescription')
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'idtracker.acronym': {
'Meta': {'db_table': "'acronym'"},
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'acronym_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_key': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'idtracker.idintendedstatus': {
'Meta': {'db_table': "'id_intended_status'"},
'intended_status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'status_value'"}),
'intended_status_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.idstatus': {
'Meta': {'db_table': "'id_status'"},
'status': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_column': "'status_value'"}),
'status_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'idtracker.internetdraft': {
'Meta': {'db_table': "'internet_drafts'"},
'abstract': ('django.db.models.fields.TextField', [], {}),
'b_approve_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'b_discussion_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'b_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dunn_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'expired_tombstone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'extension_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'file_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'filename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.Acronym']", 'db_column': "'group_acronym_id'"}),
'id_document_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id_document_tag': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IDIntendedStatus']"}),
'last_modified_date': ('django.db.models.fields.DateField', [], {}),
'lc_changes': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True'}),
'lc_expiration_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'lc_sent_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'local_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'replaced_by': ('django.db.models.fields.related.ForeignKey', ["orm['idtracker.InternetDraft']"], {'related_name': "'replaces_set'", 'null': 'True', 'db_column': "'replaced_by'", 'blank': 'True'}),
'review_by_rfc_editor': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'revision_date': ('django.db.models.fields.DateField', [], {}),
'rfc_number': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']", 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.IDStatus']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'id_document_name'"}),
'txt_page_count': ('django.db.models.fields.IntegerField', [], {}),
'wgreturn_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'idtracker.personororginfo': {
'Meta': {'db_table': "'person_or_org_info'"},
'address_type': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'first_name_key': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'last_name_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'middle_initial': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'middle_initial_key': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_suffix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'person_or_org_tag': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'record_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'ietfworkflows.annotationtag': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['permissions.Permission']", 'null': 'True', 'blank': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'annotation_tags'", 'to': "orm['workflows.Workflow']"})
},
'ietfworkflows.annotationtagobjectrelation': {
'annotation_tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ietfworkflows.AnnotationTag']"}),
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'annotation_tags'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'ietfworkflows.objectannotationtaghistoryentry': {
'objecthistoryentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ietfworkflows.ObjectHistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'setted': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'unsetted': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'ietfworkflows.objecthistoryentry': {
'comment': ('django.db.models.fields.TextField', [], {}),
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'workflow_history'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idtracker.PersonOrOrgInfo']"})
},
'ietfworkflows.objectstreamhistoryentry': {
'from_stream': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'objecthistoryentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ietfworkflows.ObjectHistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'to_stream': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'ietfworkflows.objectworkflowhistoryentry': {
'from_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'objecthistoryentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ietfworkflows.ObjectHistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'to_state': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ietfworkflows.statedescription': {
'definition': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.State']"})
},
'ietfworkflows.stateobjectrelationmetadata': {
'estimated_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'from_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.StateObjectRelation']"})
},
'ietfworkflows.stream': {
'group_chair_model': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'group_model': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'with_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ietfworkflows.WGWorkflow']"})
},
'ietfworkflows.streamedid': {
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'streamed_id'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'draft': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['idtracker.InternetDraft']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ietfworkflows.Stream']", 'null': 'True', 'blank': 'True'})
},
'ietfworkflows.wgworkflow': {
'selected_states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['workflows.State']", 'null': 'True', 'blank': 'True'}),
'selected_tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ietfworkflows.AnnotationTag']", 'null': 'True', 'blank': 'True'}),
'workflow_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['workflows.Workflow']", 'unique': 'True', 'primary_key': 'True'})
},
'permissions.permission': {
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'workflows.state': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'transitions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['workflows.Transition']", 'null': 'True', 'blank': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'states'", 'to': "orm['workflows.Workflow']"})
},
'workflows.stateobjectrelation': {
'Meta': {'unique_together': "(('content_type', 'content_id', 'state'),)"},
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'state_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.State']"})
},
'workflows.transition': {
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'destination': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'destination_state'", 'null': 'True', 'to': "orm['workflows.State']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['permissions.Permission']", 'null': 'True', 'blank': 'True'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transitions'", 'to': "orm['workflows.Workflow']"})
},
'workflows.workflow': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_state': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'workflow_state'", 'null': 'True', 'to': "orm['workflows.State']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['permissions.Permission']", 'symmetrical': 'False'})
}
}
complete_apps = ['ietfworkflows']
|
mcr/ietfdb
|
ietf/ietfworkflows/migrations/0010_add_state_definitions.py
|
Python
|
bsd-3-clause
| 16,914
|
import subprocess
from six import iteritems
def run_nastran(fname, keywords=None):
"""
Call a nastran subprocess with the given filename
Parameters
-----------
fname : string
Filename of the Nastran .bdf file
keywords : dict/list of strings, optional
Default keywords are `'scr=yes'`, `'bat=no'`, `'old=no'`, and `'news=no'`
"""
if keywords is None:
keywords_list = ['scr=yes', 'bat=no', 'old=no','news=no'] # 'mem=1024mb',
else:
if isinstance(keywords, (list, tuple)):
keywords_list = keywords
else:
keywords_list = []
for keyword, value in iteritems(keywords):
if value is None:
continue
keywords_list.append('%s=%s' % (keyword, value))
call_args = ['nastran', fname] + keywords_list
return subprocess.call(call_args)
|
saullocastro/pyNastran
|
pyNastran/utils/nastran_utils.py
|
Python
|
lgpl-3.0
| 896
|
EMAIL_HOST='smtp.gmail.com'
EMAIL_HOST_USER='webitec2017@gmail.com'#insert email
EMAIL_HOST_PASSWORD='Web!tec#2017'#insert password
EMAIL_PORT=587
|
emanuelcovaci/pythonic
|
pythonic/local.py
|
Python
|
gpl-3.0
| 146
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.openstack.common import log as logging
from trove.common import extensions
from trove.common import wsgi
from trove.extensions.account import service
LOG = logging.getLogger(__name__)
class Account(extensions.ExtensionsDescriptor):
def get_name(self):
return "Account"
def get_description(self):
return "Account information with instances"
def get_alias(self):
return "Account"
def get_namespace(self):
return "http://TBD"
def get_updated(self):
return "2012-06-07T13:25:27-06:00"
def get_resources(self):
resources = []
serializer = wsgi.TroveResponseSerializer(
body_serializers={'application/xml':
wsgi.TroveXMLDictSerializer()})
resource = extensions.ResourceExtension(
'{tenant_id}/mgmt/accounts',
service.AccountController(),
deserializer=wsgi.RequestDeserializer(),
serializer=serializer)
resources.append(resource)
return resources
|
citrix-openstack-build/trove
|
trove/extensions/routes/account.py
|
Python
|
apache-2.0
| 1,693
|
import unittest
from streamlink.plugins.rtvs import Rtvs
class TestPluginRtvs(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'http://www.rtvs.sk/televizia/live-1',
'http://www.rtvs.sk/televizia/live-2',
'http://www.rtvs.sk/televizia/live-o',
]
for url in should_match:
self.assertTrue(Rtvs.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'http://www.rtvs.sk/',
]
for url in should_not_match:
self.assertFalse(Rtvs.can_handle_url(url))
|
back-to/streamlink
|
tests/plugins/test_rtvs.py
|
Python
|
bsd-2-clause
| 622
|
from runtime import *
'''
inspect locals of a function at runtime for debugging
'''
@locals
def myfunc(value='bar'):
x = 1
y = {foo:value}
@locals
def nested():
z = 'FOO'
return value + 'NESTED'
return nested()
def main():
print myfunc.locals
print myfunc()
print myfunc.locals
assert myfunc.locals.x == 1
assert myfunc.locals.y.foo=='bar'
myfunc(value='X')
print myfunc.locals
assert myfunc.locals.y.foo=='X'
print myfunc.locals.nested.locals.z
assert myfunc.locals.nested.locals.z=='FOO'
main()
|
pombredanne/Rusthon
|
regtests/lang/func_locals.py
|
Python
|
bsd-3-clause
| 521
|
import re
from django import template
from django.template.loader import get_template
from django.template import RequestContext
register = template.Library()
INSTALLED_ARTIFACTS = dict()
def install(artifact_class):
INSTALLED_ARTIFACTS[artifact_class.key] = artifact_class
def find(data):
from fir_artifacts.models import ArtifactBlacklistItem
result = dict()
for key in INSTALLED_ARTIFACTS:
blacklist = ArtifactBlacklistItem.objects.filter(type=key).values_list('value', flat=True)
values = INSTALLED_ARTIFACTS[key].find(data)
values = [v for v in values if v not in blacklist]
result[key] = values
return result
def after_save(type, value, event):
return INSTALLED_ARTIFACTS[type].after_save(value, event)
def incs_for_art(art_string):
from fir_artifacts.models import Artifact
artifacts = Artifact.objects.filter(value__contains=art_string)
incs = []
for a in artifacts:
incs.extend(a.relations.all())
return incs
def all_for_object(obj, raw=False, user=None):
result = []
total_count = 0
correlated_count = 0
if not hasattr(obj, "artifacts"):
return (result, total_count, correlated_count)
for artifact in INSTALLED_ARTIFACTS:
values = obj.artifacts.filter(type=artifact)
artifact_collection = INSTALLED_ARTIFACTS[artifact](values, obj, user=user)
total_count += values.count()
correlated_count += artifact_collection.correlated_count()
result.append(artifact_collection)
return (result, total_count, correlated_count)
class AbstractArtifact:
case_sensitive = False
template = 'fir_artifacts/default.html'
@classmethod
def find(cls, data):
results = []
for i in re.finditer(cls.regex, data):
if cls.case_sensitive:
results.append(i.group('search'))
else:
results.append(i.group('search').lower())
return results
@classmethod
def after_save(cls, value, event):
# Do nothing, allows for specific callback in subclasses
pass
def __init__(self, artifacts, event, user=None):
class ArtifactDisplay(object):
def __init__(self, artifact, user):
self.artifact = artifact
self.correlation_count = self.artifact.relations_for_user(user).count()
@property
def value(self):
return self.artifact.value
@property
def type(self):
return self.artifact.type
@property
def id(self):
return self.artifact.id
@property
def pk(self):
return self.artifact.pk
self._artifacts = [ArtifactDisplay(artifact, user) for artifact in artifacts]
self._event = event
self._correlated = []
for artifact in self._artifacts:
if artifact.correlation_count > 1:
self._correlated.append(artifact)
def json(self, request):
return self.display(request, correlated=False, json=True)
def display(self, request, correlated=False, json=False):
context = RequestContext(request)
template = get_template(self.__class__.template)
context['artifact_name'] = self.__class__.display_name
if correlated:
context['artifact_values'] = self._correlated
else:
context['artifact_values'] = self._artifacts
context['event'] = self._event
if not json:
return template.render(context.flatten(), request)
else:
return context.flatten()
def correlated_count(self):
return len(self._correlated)
|
gcrahay/FIR
|
fir_artifacts/artifacts.py
|
Python
|
gpl-3.0
| 3,764
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
import os
import numpy as np
import shutil
from pymatgen.io.vaspio_set import MITVaspInputSet, MITHSEVaspInputSet, \
MPVaspInputSet, MITGGAVaspInputSet, MITNEBVaspInputSet,\
MPStaticVaspInputSet, MPNonSCFVaspInputSet, MITMDVaspInputSet,\
MPHSEVaspInputSet, MPBSHSEVaspInputSet, MPStaticDielectricDFPTVaspInputSet,\
MPOpticsNonSCFVaspInputSet
from pymatgen.io.vaspio.vasp_input import Poscar, Incar
from pymatgen import Specie, Lattice, Structure
from monty.json import MontyDecoder
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
dec = MontyDecoder()
class MITMPVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitparamset = MITVaspInputSet()
self.mitparamset_unsorted = MITVaspInputSet(sort_structure=False)
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
self.mitggaparam = MITGGAVaspInputSet()
self.mpstaticparamset = MPStaticVaspInputSet()
self.mpnscfparamsetu = MPNonSCFVaspInputSet(
{"NBANDS": 50}, mode="Uniform")
self.mpnscfparamsetl = MPNonSCFVaspInputSet(
{"NBANDS": 60}, mode="Line")
self.mphseparamset = MPHSEVaspInputSet()
self.mpbshseparamsetl = MPBSHSEVaspInputSet(mode="Line")
self.mpbshseparamsetu = MPBSHSEVaspInputSet(
mode="Uniform", added_kpoints=[[0.5, 0.5, 0.0]])
self.mpdielparamset = MPStaticDielectricDFPTVaspInputSet()
def test_get_poscar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
s_unsorted = self.mitparamset_unsorted.get_poscar(struct).structure
s_sorted = self.mitparamset.get_poscar(struct).structure
self.assertEqual(s_unsorted[0].specie.symbol, 'Fe')
self.assertEqual(s_sorted[0].specie.symbol, 'Mn')
def test_get_potcar_symbols(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
syms = self.paramset.get_potcar_symbols(struct)
self.assertEqual(syms, ['Fe_pv', 'P', 'O'])
syms = MPVaspInputSet(sort_structure=False).get_potcar_symbols(struct)
self.assertEqual(syms, ['P', 'Fe_pv', 'O'])
def test_false_potcar_hash(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe_pv'
self.assertRaises(ValueError, self.mitparamset.get_potcar, struct, check_hash=True)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe'
def test_lda_potcar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe"], coords)
p = MITVaspInputSet(potcar_functional="LDA").get_potcar(struct)
self.assertEqual(p.functional, 'LDA')
def test_get_nelect(self):
coords = [[0]*3, [0.5]*3, [0.75]*3]
lattice = Lattice.cubic(4)
s = Structure(lattice, ['Si', 'Si', 'Fe'], coords)
self.assertAlmostEqual(MITVaspInputSet().get_nelect(s), 16)
def test_get_incar(self):
incar = self.paramset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [5.3, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar = self.mitparamset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar_gga = self.mitggaparam.get_incar(self.struct)
self.assertNotIn("LDAU", incar_gga)
incar_static = self.mpstaticparamset.get_incar(self.struct)
self.assertEqual(incar_static["NSW"], 0)
incar_nscfl = self.mpnscfparamsetl.get_incar(self.struct)
self.assertEqual(incar_nscfl["NBANDS"], 60)
incar_nscfu = self.mpnscfparamsetu.get_incar(self.struct)
self.assertEqual(incar_nscfu["ISYM"], 0)
incar_hse = self.mphseparamset.get_incar(self.struct)
self.assertEqual(incar_hse['LHFCALC'], True)
self.assertEqual(incar_hse['HFSCREEN'], 0.2)
incar_hse_bsl = self.mpbshseparamsetl.get_incar(self.struct)
self.assertEqual(incar_hse_bsl['LHFCALC'], True)
self.assertEqual(incar_hse_bsl['HFSCREEN'], 0.2)
self.assertEqual(incar_hse_bsl['NSW'], 0)
incar_hse_bsu = self.mpbshseparamsetu.get_incar(self.struct)
self.assertEqual(incar_hse_bsu['LHFCALC'], True)
self.assertEqual(incar_hse_bsu['HFSCREEN'], 0.2)
self.assertEqual(incar_hse_bsu['NSW'], 0)
incar_diel = self.mpdielparamset.get_incar(self.struct)
self.assertEqual(incar_diel['IBRION'], 8)
self.assertEqual(incar_diel['LEPSILON'], True)
si = 14
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
#Silicon structure for testing.
latt = Lattice(np.array([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]))
struct = Structure(latt, [si, si], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn("LDAU", incar)
incar = self.mithseparamset.get_incar(self.struct)
self.assertTrue(incar['LHFCALC'])
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn('LDAU', incar)
#check fluorides
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
self.assertEqual(incar['MAGMOM'], [5, 0.6])
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0])
#Make sure this works with species.
struct = Structure(lattice, ["Fe2+", "O2-"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
struct = Structure(lattice, ["Fe", "Mn"], coords,
site_properties={'magmom': (5.2, -4.5)})
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mpstaticparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mitparamset_unsorted.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5.2, -4.5])
struct = Structure(lattice, [Specie("Fe", 2, {'spin': 4.1}), "Mn"],
coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5, 4.1])
incar = self.mpnscfparamsetl.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
struct = Structure(lattice, ["Mn3+", "Mn4+"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [4, 3])
incar = self.mpnscfparamsetu.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[100, 0.6])
#sulfide vs sulfate test
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.25, 0.5, 0])
struct = Structure(lattice, ["Fe", "Fe", "S"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [1.9, 0])
#Make sure Matproject sulfides are ok.
self.assertNotIn('LDAUU', self.paramset.get_incar(struct))
self.assertNotIn('LDAUU', self.mpstaticparamset.get_incar(struct))
struct = Structure(lattice, ["Fe", "S", "O"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
#Make sure Matproject sulfates are ok.
self.assertEqual(self.paramset.get_incar(struct)['LDAUU'], [5.3, 0, 0])
self.assertEqual(self.mpnscfparamsetl.get_incar(struct)['LDAUU'],
[5.3, 0, 0])
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[10, -5, 0.6])
def test_optics(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
self.mpopticsparamset = MPOpticsNonSCFVaspInputSet.from_previous_vasp_run(
'{}/static_silicon'.format(test_dir), output_dir='optics_test_dir',
nedos=1145)
self.assertTrue(os.path.exists('optics_test_dir/CHGCAR'))
incar = Incar.from_file('optics_test_dir/INCAR')
self.assertTrue(incar['LOPTICS'])
self.assertEqual(incar['NEDOS'], 1145)
#Remove the directory in which the inputs have been created
shutil.rmtree('optics_test_dir')
def test_get_kpoints(self):
kpoints = self.paramset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mitparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mpstaticparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[6, 6, 4]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mpnscfparamsetl.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 140)
self.assertEqual(kpoints.style, 'Reciprocal')
kpoints = self.mpnscfparamsetu.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 168)
kpoints = self.mpbshseparamsetl.get_kpoints(self.struct)
self.assertAlmostEqual(kpoints.num_kpts, 164)
self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)
self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)
self.assertAlmostEqual(kpoints.kpts[-1][0], 0.66006924)
self.assertAlmostEqual(kpoints.kpts[-1][1], 0.51780182)
self.assertAlmostEqual(kpoints.kpts[-1][2], 0.30173482)
kpoints = self.mpbshseparamsetu.get_kpoints(self.struct)
self.assertAlmostEqual(kpoints.num_kpts, 25)
self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)
self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)
self.assertAlmostEqual(kpoints.kpts[-1][0], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][2], 0.0)
def test_get_all_vasp_input(self):
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], -5)
self.struct.make_supercell(4)
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], 0)
def test_to_from_dict(self):
self.mitparamset = MITVaspInputSet()
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
d = self.mitparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 4)
d = self.mitggaparam.as_dict()
v = dec.process_decoded(d)
self.assertNotIn("LDAUU", v.incar_settings)
d = self.mithseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.mphseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.paramset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 5.3)
d = self.userparamset.as_dict()
v = dec.process_decoded(d)
#self.assertEqual(type(v), MPVaspInputSet)
self.assertEqual(v.incar_settings["MAGMOM"],
{"Fe": 10, "S": -5, "Mn3+": 100})
class MITMDVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitmdparam = MITMDVaspInputSet(300, 1200, 10000)
def test_get_potcar_symbols(self):
syms = self.mitmdparam.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.mitmdparam.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 2.4e-5)
def test_get_kpoints(self):
kpoints = self.mitmdparam.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [(1, 1, 1)])
self.assertEqual(kpoints.style, 'Gamma')
def test_to_from_dict(self):
d = self.mitmdparam.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MITMDVaspInputSet)
self.assertEqual(v.incar_settings["TEBEG"], 300)
class MITNEBVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.vis = MITNEBVaspInputSet(nimages=10, hubbard_off=True)
def test_get_potcar_symbols(self):
syms = self.vis.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.vis.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 0.00005)
def test_get_kpoints(self):
kpoints = self.vis.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
def test_to_from_dict(self):
d = self.vis.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["IMAGES"], 10)
def test_write_inputs(self):
c1 = [[0.5] * 3, [0.9] * 3]
c2 = [[0.5] * 3, [0.9, 0.1, 0.1]]
s1 = Structure(Lattice.cubic(5), ['Si', 'Si'], c1)
s2 = Structure(Lattice.cubic(5), ['Si', 'Si'], c2)
structs = []
for s in s1.interpolate(s2, 3, pbc=True):
structs.append(Structure.from_sites(s.sites,
to_unit_cell=True))
fc = self.vis._process_structures(structs)[2].frac_coords
self.assertTrue(np.allclose(fc, [[0.5]*3,[0.9, 1.033333, 1.0333333]]))
if __name__ == '__main__':
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/io/tests/test_vaspio_set.py
|
Python
|
mit
| 16,493
|
# -*- coding: utf-8 -*-
"""
Parse and stream Bitcoin blocks as either Block or BlockHeader structures.
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import struct
import io
from .encoding import double_sha256
from .merkle import merkle
from .serialize.bitcoin_streamer import parse_struct, stream_struct
from .serialize import b2h, b2h_rev
from .tx import Tx
class BadMerkleRootError(Exception):
pass
def difficulty_max_mask_for_bits(bits):
prefix = bits >> 24
mask = (bits & 0x7ffff) << (8 * (prefix - 3))
return mask
class BlockHeader(object):
"""A BlockHeader is a block with the transaction data removed. With a
complete Merkle tree database, it can be reconstructed from the
merkle_root."""
@classmethod
def parse(self, f):
"""Parse the BlockHeader from the file-like object in the standard way
that blocks are sent in the network (well, except we ignore the
transaction information)."""
(version, previous_block_hash, merkle_root,
timestamp, difficulty, nonce) = struct.unpack("<L32s32sLLL", f.read(4+32+32+4*3))
return self(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce)
def __init__(self, version, previous_block_hash, merkle_root, timestamp, difficulty, nonce):
self.version = version
self.previous_block_hash = previous_block_hash
self.merkle_root = merkle_root
self.timestamp = timestamp
self.difficulty = difficulty
self.nonce = nonce
def set_nonce(self, nonce):
self.nonce = nonce
if hasattr(self, "__hash"):
del self.__hash
def hash(self):
"""Calculate the hash for the block header. Note that this has the bytes
in the opposite order from how the header is usually displayed (so the
long string of 00 bytes is at the end, not the beginning)."""
if not hasattr(self, "__hash"):
s = io.BytesIO()
self.stream_header(s)
self.__hash = double_sha256(s.getvalue())
return self.__hash
def stream_header(self, f):
"""Stream the block header in the standard way to the file-like object f."""
stream_struct("L##LLL", f, self.version, self.previous_block_hash,
self.merkle_root, self.timestamp, self.difficulty, self.nonce)
def stream(self, f):
"""Stream the block header in the standard way to the file-like object f.
The Block subclass also includes the transactions."""
self.stream_header(f)
def as_bin(self):
"""Return the transaction as binary."""
f = io.BytesIO()
self.stream(f)
return f.getvalue()
def as_hex(self):
"""Return the transaction as hex."""
return b2h(self.as_bin())
def id(self):
"""Returns the hash of the block displayed with the bytes in the order
they are usually displayed in."""
return b2h_rev(self.hash())
def previous_block_id(self):
"""Returns the hash of the previous block, with the bytes in the order
they are usually displayed in."""
return b2h_rev(self.previous_block_hash)
def __str__(self):
return "BlockHeader [%s] (previous %s)" % (self.id(), self.previous_block_id())
def __repr__(self):
return "BlockHeader [%s] (previous %s)" % (self.id(), self.previous_block_id())
class Block(BlockHeader):
"""A Block is an element of the Bitcoin chain. Generating a block
yields a reward!"""
@classmethod
def parse(self, f, include_offsets=None):
"""Parse the Block from the file-like object in the standard way
that blocks are sent in the network."""
if include_offsets is None:
include_offsets = hasattr(f, "tell")
(version, previous_block_hash, merkle_root, timestamp,
difficulty, nonce, count) = parse_struct("L##LLLI", f)
txs = []
for i in range(count):
if include_offsets:
offset_in_block = f.tell()
tx = Tx.parse(f)
txs.append(tx)
if include_offsets:
tx.offset_in_block = offset_in_block
block = self(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce, txs)
for tx in txs:
tx.block = block
return block
@classmethod
def from_bin(self, bytes):
f = io.BytesIO(bytes)
return self.parse(f)
def __init__(self, version, previous_block_hash, merkle_root, timestamp, difficulty, nonce, txs):
self.version = version
self.previous_block_hash = previous_block_hash
self.merkle_root = merkle_root
self.timestamp = timestamp
self.difficulty = difficulty
self.nonce = nonce
self.txs = txs
def as_blockheader(self):
return BlockHeader(self.version, self.previous_block_hash, self.merkle_root,
self.timestamp, self.difficulty, self.nonce)
def stream(self, f):
"""Stream the block in the standard way to the file-like object f."""
stream_struct("L##LLLI", f, self.version, self.previous_block_hash,
self.merkle_root, self.timestamp, self.difficulty, self.nonce, len(self.txs))
for t in self.txs:
t.stream(f)
def as_bin(self):
"""Return the transaction as binary."""
f = io.BytesIO()
self.stream(f)
return f.getvalue()
def as_hex(self):
"""Return the transaction as hex."""
return b2h(self.as_bin())
def check_merkle_hash(self):
"""Raise a BadMerkleRootError if the Merkle hash of the
transactions does not match the Merkle hash included in the block."""
calculated_hash = merkle([tx.hash() for tx in self.txs], double_sha256)
if calculated_hash != self.merkle_root:
raise BadMerkleRootError(
"calculated %s but block contains %s" % (b2h(calculated_hash), b2h(self.merkle_root)))
def __str__(self):
return "Block [%s] (previous %s) [tx count: %d]" % (
self.id(), self.previous_block_id(), len(self.txs))
def __repr__(self):
return "Block [%s] (previous %s) [tx count: %d] %s" % (
self.id(), self.previous_block_id(), len(self.txs), self.txs)
|
moocowmoo/pycoin
|
pycoin/block.py
|
Python
|
mit
| 7,399
|
from nose.tools import * # flake8: noqa
import functools
from tests.base import ApiTestCase
from website.project.licenses import NodeLicense
from website.project.licenses import ensure_licenses
from api.base.settings.defaults import API_BASE
ensure_licenses = functools.partial(ensure_licenses, warn=False)
class TestLicenseList(ApiTestCase):
def setUp(self):
super(TestLicenseList, self).setUp()
ensure_licenses()
self.licenses = NodeLicense.find()
def test_license_list_success(self):
url = '/{}licenses/'.format(API_BASE)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_license_list_count_correct(self):
url = '/{}licenses/'.format(API_BASE)
res = self.app.get(url)
total = res.json['links']['meta']['total']
assert_equal(total, self.licenses.count())
def test_license_list_name_filter(self):
license = self.licenses[0]
name = license.name
url = '/{}licenses/?filter[name]={}'.format(API_BASE, name)
res = self.app.get(url)
data = res.json['data'][0]
assert_equal(data['attributes']['name'], name)
assert_equal(data['id'], license._id)
def test_license_list_id_filter(self):
license = self.licenses[0]
id = license._id
url = '/{}licenses/?filter[id]={}'.format(API_BASE, id)
res = self.app.get(url)
data = res.json['data'][0]
assert_equal(data['attributes']['name'], license.name)
assert_equal(data['id'], id)
|
monikagrabowska/osf.io
|
api_tests/licenses/views/test_license_list.py
|
Python
|
apache-2.0
| 1,621
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DirectRunner, executing on the local machine.
The DirectRunner is a runner implementation that executes the entire
graph of transformations belonging to a pipeline on the local machine.
"""
# pytype: skip-file
import itertools
import logging
import time
import typing
from google.protobuf import wrappers_pb2
import apache_beam as beam
from apache_beam import coders
from apache_beam import typehints
from apache_beam.internal.util import ArgumentPlaceholder
from apache_beam.options.pipeline_options import DirectOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.pvalue import PCollection
from apache_beam.runners.direct.bundle_factory import BundleFactory
from apache_beam.runners.direct.clock import RealClock
from apache_beam.runners.direct.clock import TestClock
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.transforms import userstate
from apache_beam.transforms.core import CombinePerKey
from apache_beam.transforms.core import CombineValuesDoFn
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.ptransform import PTransform
from apache_beam.transforms.timeutil import TimeDomain
from apache_beam.typehints import trivial_inference
# Note that the BundleBasedDirectRunner and SwitchingDirectRunner names are
# experimental and have no backwards compatibility guarantees.
__all__ = ['BundleBasedDirectRunner', 'DirectRunner', 'SwitchingDirectRunner']
_LOGGER = logging.getLogger(__name__)
class SwitchingDirectRunner(PipelineRunner):
"""Executes a single pipeline on the local machine.
This implementation switches between using the FnApiRunner (which has
high throughput for batch jobs) and using the BundleBasedDirectRunner,
which supports streaming execution and certain primitives not yet
implemented in the FnApiRunner.
"""
def is_fnapi_compatible(self):
return BundleBasedDirectRunner.is_fnapi_compatible()
def run_pipeline(self, pipeline, options):
from apache_beam.pipeline import PipelineVisitor
from apache_beam.runners.dataflow.native_io.iobase import NativeSource
from apache_beam.runners.dataflow.native_io.iobase import _NativeWrite
from apache_beam.testing.test_stream import TestStream
class _FnApiRunnerSupportVisitor(PipelineVisitor):
"""Visitor determining if a Pipeline can be run on the FnApiRunner."""
def accept(self, pipeline):
self.supported_by_fnapi_runner = True
pipeline.visit(self)
return self.supported_by_fnapi_runner
def visit_transform(self, applied_ptransform):
transform = applied_ptransform.transform
# The FnApiRunner does not support streaming execution.
if isinstance(transform, TestStream):
self.supported_by_fnapi_runner = False
# The FnApiRunner does not support reads from NativeSources.
if (isinstance(transform, beam.io.Read) and
isinstance(transform.source, NativeSource)):
self.supported_by_fnapi_runner = False
# The FnApiRunner does not support the use of _NativeWrites.
if isinstance(transform, _NativeWrite):
self.supported_by_fnapi_runner = False
if isinstance(transform, beam.ParDo):
dofn = transform.dofn
# The FnApiRunner does not support execution of CombineFns with
# deferred side inputs.
if isinstance(dofn, CombineValuesDoFn):
args, kwargs = transform.raw_side_inputs
args_to_check = itertools.chain(args, kwargs.values())
if any(isinstance(arg, ArgumentPlaceholder)
for arg in args_to_check):
self.supported_by_fnapi_runner = False
if userstate.is_stateful_dofn(dofn):
_, timer_specs = userstate.get_dofn_specs(dofn)
for timer in timer_specs:
if timer.time_domain == TimeDomain.REAL_TIME:
self.supported_by_fnapi_runner = False
# Check whether all transforms used in the pipeline are supported by the
# FnApiRunner, and the pipeline was not meant to be run as streaming.
if _FnApiRunnerSupportVisitor().accept(pipeline):
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.runners.portability.fn_api_runner import fn_runner
from apache_beam.runners.portability.portable_runner import JobServiceHandle
all_options = options.get_all_options()
encoded_options = JobServiceHandle.encode_pipeline_options(all_options)
provision_info = fn_runner.ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
pipeline_options=encoded_options))
runner = fn_runner.FnApiRunner(provision_info=provision_info)
else:
runner = BundleBasedDirectRunner()
return runner.run_pipeline(pipeline, options)
# Type variables.
K = typing.TypeVar('K')
V = typing.TypeVar('V')
@typehints.with_input_types(typing.Tuple[K, V])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _GroupByKeyOnly(PTransform):
"""A group by key transform, ignoring windows."""
def infer_output_type(self, input_type):
key_type, value_type = trivial_inference.key_value_types(input_type)
return typehints.KV[key_type, typehints.Iterable[value_type]]
def expand(self, pcoll):
self._check_pcollection(pcoll)
return PCollection.from_(pcoll)
@typehints.with_input_types(typing.Tuple[K, typing.Iterable[V]])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _GroupAlsoByWindow(ParDo):
"""The GroupAlsoByWindow transform."""
def __init__(self, windowing):
super(_GroupAlsoByWindow, self).__init__(_GroupAlsoByWindowDoFn(windowing))
self.windowing = windowing
def expand(self, pcoll):
self._check_pcollection(pcoll)
return PCollection.from_(pcoll)
class _GroupAlsoByWindowDoFn(DoFn):
# TODO(robertwb): Support combiner lifting.
def __init__(self, windowing):
super(_GroupAlsoByWindowDoFn, self).__init__()
self.windowing = windowing
def infer_output_type(self, input_type):
key_type, windowed_value_iter_type = trivial_inference.key_value_types(
input_type)
value_type = windowed_value_iter_type.inner_type.inner_type
return typehints.Iterable[typehints.KV[key_type,
typehints.Iterable[value_type]]]
def start_bundle(self):
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.trigger import create_trigger_driver
# pylint: enable=wrong-import-order, wrong-import-position
self.driver = create_trigger_driver(self.windowing, True)
def process(self, element):
k, vs = element
return self.driver.process_entire_key(k, vs)
@typehints.with_input_types(typing.Tuple[K, V])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _StreamingGroupByKeyOnly(_GroupByKeyOnly):
"""Streaming GroupByKeyOnly placeholder for overriding in DirectRunner."""
urn = "direct_runner:streaming_gbko:v0.1"
# These are needed due to apply overloads.
def to_runner_api_parameter(self, unused_context):
return _StreamingGroupByKeyOnly.urn, None
@staticmethod
@PTransform.register_urn(urn, None)
def from_runner_api_parameter(
unused_ptransform, unused_payload, unused_context):
return _StreamingGroupByKeyOnly()
@typehints.with_input_types(typing.Tuple[K, typing.Iterable[V]])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _StreamingGroupAlsoByWindow(_GroupAlsoByWindow):
"""Streaming GroupAlsoByWindow placeholder for overriding in DirectRunner."""
urn = "direct_runner:streaming_gabw:v0.1"
# These are needed due to apply overloads.
def to_runner_api_parameter(self, context):
return (
_StreamingGroupAlsoByWindow.urn,
wrappers_pb2.BytesValue(
value=context.windowing_strategies.get_id(self.windowing)))
@staticmethod
@PTransform.register_urn(urn, wrappers_pb2.BytesValue)
def from_runner_api_parameter(unused_ptransform, payload, context):
return _StreamingGroupAlsoByWindow(
context.windowing_strategies.get_by_id(payload.value))
@typehints.with_input_types(typing.Tuple[K, typing.Iterable[V]])
@typehints.with_output_types(typing.Tuple[K, typing.Iterable[V]])
class _GroupByKey(PTransform):
"""The DirectRunner GroupByKey implementation."""
def expand(self, pcoll):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.coders import typecoders
input_type = pcoll.element_type
if input_type is not None:
# Initialize type-hints used below to enforce type-checking and to
# pass downstream to further PTransforms.
key_type, value_type = trivial_inference.key_value_types(input_type)
# Enforce the input to a GBK has a KV element type.
pcoll.element_type = typehints.typehints.coerce_to_kv_type(
pcoll.element_type)
typecoders.registry.verify_deterministic(
typecoders.registry.get_coder(key_type),
'GroupByKey operation "%s"' % self.label)
reify_output_type = typehints.KV[
key_type, typehints.WindowedValue[value_type]] # type: ignore[misc]
gbk_input_type = (
typehints.KV[
key_type,
typehints.Iterable[typehints.WindowedValue[ # type: ignore[misc]
value_type]]])
gbk_output_type = typehints.KV[key_type, typehints.Iterable[value_type]]
# pylint: disable=bad-continuation
return (
pcoll
| 'ReifyWindows' >> (
ParDo(beam.GroupByKey.ReifyWindows()).with_output_types(
reify_output_type))
| 'GroupByKey' >> (
_GroupByKeyOnly().with_input_types(
reify_output_type).with_output_types(gbk_input_type))
| (
'GroupByWindow' >>
_GroupAlsoByWindow(pcoll.windowing).with_input_types(
gbk_input_type).with_output_types(gbk_output_type)))
else:
# The input_type is None, run the default
return (
pcoll
| 'ReifyWindows' >> ParDo(beam.GroupByKey.ReifyWindows())
| 'GroupByKey' >> _GroupByKeyOnly()
| 'GroupByWindow' >> _GroupAlsoByWindow(pcoll.windowing))
def _get_transform_overrides(pipeline_options):
# A list of PTransformOverride objects to be applied before running a pipeline
# using DirectRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal use only; no backwards-compatibility guarantees.
# Importing following locally to avoid a circular dependency.
from apache_beam.pipeline import PTransformOverride
from apache_beam.runners.direct.helper_transforms import LiftedCombinePerKey
from apache_beam.runners.direct.sdf_direct_runner import ProcessKeyedElementsViaKeyedWorkItemsOverride
from apache_beam.runners.direct.sdf_direct_runner import SplittableParDoOverride
class CombinePerKeyOverride(PTransformOverride):
def matches(self, applied_ptransform):
if isinstance(applied_ptransform.transform, CombinePerKey):
return applied_ptransform.inputs[0].windowing.is_default()
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
try:
transform = applied_ptransform.transform
return LiftedCombinePerKey(
transform.fn, transform.args, transform.kwargs)
except NotImplementedError:
return transform
class StreamingGroupByKeyOverride(PTransformOverride):
def matches(self, applied_ptransform):
# Note: we match the exact class, since we replace it with a subclass.
return applied_ptransform.transform.__class__ == _GroupByKeyOnly
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
# Use specialized streaming implementation.
transform = _StreamingGroupByKeyOnly()
return transform
class StreamingGroupAlsoByWindowOverride(PTransformOverride):
def matches(self, applied_ptransform):
# Note: we match the exact class, since we replace it with a subclass.
transform = applied_ptransform.transform
return (
isinstance(applied_ptransform.transform, ParDo) and
isinstance(transform.dofn, _GroupAlsoByWindowDoFn) and
transform.__class__ != _StreamingGroupAlsoByWindow)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
# Use specialized streaming implementation.
transform = _StreamingGroupAlsoByWindow(
applied_ptransform.transform.dofn.windowing)
return transform
class TestStreamOverride(PTransformOverride):
def matches(self, applied_ptransform):
from apache_beam.testing.test_stream import TestStream
self.applied_ptransform = applied_ptransform
return isinstance(applied_ptransform.transform, TestStream)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
from apache_beam.runners.direct.test_stream_impl import _ExpandableTestStream
return _ExpandableTestStream(applied_ptransform.transform)
class GroupByKeyPTransformOverride(PTransformOverride):
"""A ``PTransformOverride`` for ``GroupByKey``.
This replaces the Beam implementation as a primitive.
"""
def matches(self, applied_ptransform):
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import GroupByKey
return isinstance(applied_ptransform.transform, GroupByKey)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
return _GroupByKey()
overrides = [
# This needs to be the first and the last override. Other overrides depend
# on the GroupByKey implementation to be composed of _GroupByKeyOnly and
# _GroupAlsoByWindow.
GroupByKeyPTransformOverride(),
SplittableParDoOverride(),
ProcessKeyedElementsViaKeyedWorkItemsOverride(),
CombinePerKeyOverride(),
TestStreamOverride(),
]
# Add streaming overrides, if necessary.
if pipeline_options.view_as(StandardOptions).streaming:
overrides.append(StreamingGroupByKeyOverride())
overrides.append(StreamingGroupAlsoByWindowOverride())
# Add PubSub overrides, if PubSub is available.
try:
from apache_beam.io.gcp import pubsub as unused_pubsub
overrides += _get_pubsub_transform_overrides(pipeline_options)
except ImportError:
pass
# This also needs to be last because other transforms apply GBKs which need to
# be translated into a DirectRunner-compatible transform.
overrides.append(GroupByKeyPTransformOverride())
return overrides
class _DirectReadFromPubSub(PTransform):
def __init__(self, source):
self._source = source
def _infer_output_coder(
self, unused_input_type=None, unused_input_coder=None):
# type: (...) -> typing.Optional[coders.Coder]
return coders.BytesCoder()
def get_windowing(self, unused_inputs):
return beam.Windowing(beam.window.GlobalWindows())
def expand(self, pvalue):
# This is handled as a native transform.
return PCollection(self.pipeline, is_bounded=self._source.is_bounded())
class _DirectWriteToPubSubFn(DoFn):
BUFFER_SIZE_ELEMENTS = 100
FLUSH_TIMEOUT_SECS = BUFFER_SIZE_ELEMENTS * 0.5
def __init__(self, transform):
self.project = transform.project
self.short_topic_name = transform.topic_name
self.id_label = transform.id_label
self.timestamp_attribute = transform.timestamp_attribute
self.with_attributes = transform.with_attributes
# TODO(BEAM-4275): Add support for id_label and timestamp_attribute.
if transform.id_label:
raise NotImplementedError(
'DirectRunner: id_label is not supported for '
'PubSub writes')
if transform.timestamp_attribute:
raise NotImplementedError(
'DirectRunner: timestamp_attribute is not '
'supported for PubSub writes')
def start_bundle(self):
self._buffer = []
def process(self, elem):
self._buffer.append(elem)
if len(self._buffer) >= self.BUFFER_SIZE_ELEMENTS:
self._flush()
def finish_bundle(self):
self._flush()
def _flush(self):
from google.cloud import pubsub
pub_client = pubsub.PublisherClient()
topic = pub_client.topic_path(self.project, self.short_topic_name)
if self.with_attributes:
futures = [
pub_client.publish(topic, elem.data, **elem.attributes)
for elem in self._buffer
]
else:
futures = [pub_client.publish(topic, elem) for elem in self._buffer]
timer_start = time.time()
for future in futures:
remaining = self.FLUSH_TIMEOUT_SECS - (time.time() - timer_start)
future.result(remaining)
self._buffer = []
def _get_pubsub_transform_overrides(pipeline_options):
from apache_beam.io.gcp import pubsub as beam_pubsub
from apache_beam.pipeline import PTransformOverride
class ReadFromPubSubOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(
applied_ptransform.transform, beam_pubsub.ReadFromPubSub)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
if not pipeline_options.view_as(StandardOptions).streaming:
raise Exception(
'PubSub I/O is only available in streaming mode '
'(use the --streaming flag).')
return _DirectReadFromPubSub(applied_ptransform.transform._source)
class WriteToPubSubOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, beam_pubsub.WriteToPubSub)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
if not pipeline_options.view_as(StandardOptions).streaming:
raise Exception(
'PubSub I/O is only available in streaming mode '
'(use the --streaming flag).')
return beam.ParDo(_DirectWriteToPubSubFn(applied_ptransform.transform))
return [ReadFromPubSubOverride(), WriteToPubSubOverride()]
class BundleBasedDirectRunner(PipelineRunner):
"""Executes a single pipeline on the local machine."""
@staticmethod
def is_fnapi_compatible():
return False
def run_pipeline(self, pipeline, options):
"""Execute the entire pipeline and returns an DirectPipelineResult."""
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.pipeline import PipelineVisitor
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import \
ConsumerTrackingPipelineVisitor
from apache_beam.runners.direct.evaluation_context import EvaluationContext
from apache_beam.runners.direct.executor import Executor
from apache_beam.runners.direct.transform_evaluator import \
TransformEvaluatorRegistry
from apache_beam.testing.test_stream import TestStream
# If the TestStream I/O is used, use a mock test clock.
class TestStreamUsageVisitor(PipelineVisitor):
"""Visitor determining whether a Pipeline uses a TestStream."""
def __init__(self):
self.uses_test_stream = False
def visit_transform(self, applied_ptransform):
if isinstance(applied_ptransform.transform, TestStream):
self.uses_test_stream = True
visitor = TestStreamUsageVisitor()
pipeline.visit(visitor)
clock = TestClock() if visitor.uses_test_stream else RealClock()
# Performing configured PTransform overrides.
pipeline.replace_all(_get_transform_overrides(options))
_LOGGER.info('Running pipeline with DirectRunner.')
self.consumer_tracking_visitor = ConsumerTrackingPipelineVisitor()
pipeline.visit(self.consumer_tracking_visitor)
evaluation_context = EvaluationContext(
options,
BundleFactory(
stacked=options.view_as(
DirectOptions).direct_runner_use_stacked_bundle),
self.consumer_tracking_visitor.root_transforms,
self.consumer_tracking_visitor.value_to_consumers,
self.consumer_tracking_visitor.step_names,
self.consumer_tracking_visitor.views,
clock)
executor = Executor(
self.consumer_tracking_visitor.value_to_consumers,
TransformEvaluatorRegistry(evaluation_context),
evaluation_context)
# DirectRunner does not support injecting
# PipelineOptions values at runtime
RuntimeValueProvider.set_runtime_options({})
# Start the executor. This is a non-blocking call, it will start the
# execution in background threads and return.
executor.start(self.consumer_tracking_visitor.root_transforms)
result = DirectPipelineResult(executor, evaluation_context)
return result
# Use the SwitchingDirectRunner as the default.
DirectRunner = SwitchingDirectRunner
class DirectPipelineResult(PipelineResult):
"""A DirectPipelineResult provides access to info about a pipeline."""
def __init__(self, executor, evaluation_context):
super(DirectPipelineResult, self).__init__(PipelineState.RUNNING)
self._executor = executor
self._evaluation_context = evaluation_context
def __del__(self):
if self._state == PipelineState.RUNNING:
_LOGGER.warning(
'The DirectPipelineResult is being garbage-collected while the '
'DirectRunner is still running the corresponding pipeline. This may '
'lead to incomplete execution of the pipeline if the main thread '
'exits before pipeline completion. Consider using '
'result.wait_until_finish() to wait for completion of pipeline '
'execution.')
def wait_until_finish(self, duration=None):
if not PipelineState.is_terminal(self.state):
if duration:
raise NotImplementedError(
'DirectRunner does not support duration argument.')
try:
self._executor.await_completion()
self._state = PipelineState.DONE
except: # pylint: disable=broad-except
self._state = PipelineState.FAILED
raise
return self._state
def aggregated_values(self, aggregator_or_name):
return self._evaluation_context.get_aggregator_values(aggregator_or_name)
def metrics(self):
return self._evaluation_context.metrics()
def cancel(self):
"""Shuts down pipeline workers.
For testing use only. Does not properly wait for pipeline workers to shut
down.
"""
self._state = PipelineState.CANCELLING
self._executor.shutdown()
self._state = PipelineState.CANCELLED
|
robertwb/incubator-beam
|
sdks/python/apache_beam/runners/direct/direct_runner.py
|
Python
|
apache-2.0
| 24,082
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import copy
"""
This Script is a duplicate of Packs/Campaign/Scripts/GetCampaignIncidentsInfo with the only change of the context field
the data is taken from. The reason is that dynamic section in layout cannot use arguments in the scripts they use.
"""
DEFAULT_HEADERS = ['id', 'name', 'emailfrom', 'recipients', 'severity', 'status', 'created']
KEYS_FETCHED_BY_QUERY = ['status', 'severity']
NO_CAMPAIGN_INCIDENTS_MSG = 'There is no Campaign Incidents in the Context'
LINKABLE_ID_FORMAT = '[{incident_id}](#/Details/{incident_id})'
STATUS_DICT = {
0: "Pending",
1: "Active",
2: "Closed",
3: "Archive",
}
DEFAULT_CUSTOM_FIELDS = {
'campaignclosenotes': 'Notes explaining why the incident was closed',
'campaignemailsubject': 'Campaign detected',
'campaignemailbody': 'Fill here message for the recipients',
'selectcampaignincidents': ['All']
}
SEVERITIES = {
4: 'Critical',
3: 'High',
2: 'Medium',
1: 'Low',
0.5: 'Info',
0: 'Unknown'
}
def update_incident_with_required_keys(incidents, required_keys):
"""
Update the given incident dict (from context) with values retrieved by GetIncidentsByQuery command
:type incidents: ``list``
:param incidents: campaign incidents from the context
:type required_keys: ``list``
:param required_keys: keys need to be updated
"""
ids = [str(incident['id']) for incident in incidents]
res = demisto.executeCommand('GetIncidentsByQuery', {
'query': "id:({})".format(' '.join(ids))
})
if isError(res):
return_error(f'Error occurred while trying to get incidents by query: {get_error(res)}')
incidents_from_query = json.loads(res[0]['Contents'])
id_to_updated_incident_map = {incident['id']: incident for incident in incidents_from_query}
for incident in incidents:
updated_incident = id_to_updated_incident_map[incident['id']]
for key in required_keys:
incident[key] = updated_incident.get(key)
def convert_incident_to_hr(incident):
"""
Get the value from incident dict and convert it in some cases e.g. make id linkable etc.
Note: this script change the original incident
:type incident: ``dict``
:param incident: the incident to get the value from
:type key: ``str``
:param key: the key in dict
:rtype: ``None``
:return None
"""
converted_incident = copy.deepcopy(incident)
for key in converted_incident.keys():
if key == 'status':
converted_incident[key] = STATUS_DICT.get(converted_incident.get(key))
if key == 'id':
converted_incident[key] = LINKABLE_ID_FORMAT.format(incident_id=converted_incident.get(key))
if key == 'severity':
converted_incident[key] = SEVERITIES.get(converted_incident.get(key), '')
if key == 'similarity':
if str(converted_incident[key])[0] == '1':
converted_incident[key] = '1'
elif len(str(converted_incident[key])) > 4:
converted_incident[key] = str(round(converted_incident[key], 3))
converted_incident[key] = converted_incident[key][:-1] if len(converted_incident[key]) > 4 \
else converted_incident[key]
else:
converted_incident[key] = str(converted_incident[key])
converted_incident[key] = converted_incident.get(key.replace('_', ''))
return converted_incident
def get_campaign_incidents_from_context():
return demisto.get(demisto.context(), 'EmailCampaign.LowerSimilarityIncidents')
def get_incidents_info_md(incidents, fields_to_display=None):
"""
Get the campaign incidents relevant info in MD table
:type incidents: ``list``
:param incidents: the campaign incidents to collect the info from
:type fields_to_display: ``list``
:param fields_to_display: list of result headers
:rtype: ``str``
:return the MD table str
"""
if incidents:
if not fields_to_display:
headers = DEFAULT_HEADERS
else:
headers = fields_to_display
converted_incidents = [convert_incident_to_hr(incident) for incident in incidents]
return tableToMarkdown(
name='',
t=converted_incidents,
headerTransform=string_to_table_header,
headers=headers,
removeNull=True,
)
return None
def update_empty_fields():
"""
Update the campaign dynamic section empty field with default values in order for them to appear in the page
"""
incident = demisto.incidents()[0]
custom_fields = incident.get('customFields', {})
for field in DEFAULT_CUSTOM_FIELDS.keys():
if not custom_fields.get(field):
custom_fields[field] = DEFAULT_CUSTOM_FIELDS[field]
demisto.executeCommand('setIncident', {'id': incident['id'], 'customFields': custom_fields})
def main():
try:
incidents = get_campaign_incidents_from_context()
fields_to_display = demisto.get(demisto.context(), 'EmailCampaign.fieldsToDisplay')
if incidents:
update_incident_with_required_keys(incidents, KEYS_FETCHED_BY_QUERY)
update_empty_fields()
readable_output = get_incidents_info_md(incidents, fields_to_display)
else:
readable_output = NO_CAMPAIGN_INCIDENTS_MSG
return_results(CommandResults(readable_output=readable_output))
except Exception as err:
return_error(str(err))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
VirusTotal/content
|
Packs/Campaign/Scripts/GetCampaignLowSimilarityIncidentsInfo/GetCampaignLowSimilarityIncidentsInfo.py
|
Python
|
mit
| 5,766
|
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import re
import os
import pwd
import web
import karesansui
from karesansui.lib.rest import Rest, auth
from karesansui.lib.virt.virt import KaresansuiVirtException, \
KaresansuiVirtConnection
from karesansui.lib.merge import MergeGuest
from karesansui.lib.utils import get_ifconfig_info, generate_mac_address, is_param, \
generate_uuid, string_from_uuid
from karesansui.gadget.guestby1disk import create_disk_job, validates_disk, \
create_storage_volume_dir, exec_disk_job
from karesansui.gadget.guestby1nic import create_nic_job, validates_nic
from karesansui.gadget.hostby1networkstorage import get_iscsi_cmd
from karesansui.db.access.machine import findbyguest1
from karesansui.db.access._2pysilhouette import save_job_collaboration
from karesansui.db.access.machine2jobgroup import new as m2j_new
from karesansui.db.model._2pysilhouette import JobGroup, Job
from pysilhouette.command import dict2command
from karesansui.lib.const import DISK_QEMU_FORMAT, DISK_NON_QEMU_FORMAT, \
STORAGE_VOLUME_PWD, DISK_USES, \
VIRT_COMMAND_DELETE_STORAGE_VOLUME, VIRT_COMMAND_CREATE_STORAGE_VOLUME
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, \
CHECK_STARTROOT, CHECK_EXIST, CHECK_ISDIR
class GuestBy1Device(Rest):
@auth
def _GET(self, *param, **params):
(host_id, guest_id) = self.chk_guestby1(param)
if guest_id is None: return web.notfound()
bridge_prefix = {
"XEN":"xenbr",
"KVM":"br|bondbr",
#"KVM":"eth|bondbr",
}
model = findbyguest1(self.orm, guest_id)
# virt
self.kvc = KaresansuiVirtConnection()
try:
domname = self.kvc.uuid_to_domname(model.uniq_key)
if not domname:
return web.notfound()
virt = self.kvc.search_kvg_guests(domname)[0]
guest = MergeGuest(model, virt)
self.view.guest = guest
# Output .input
if self.is_mode_input() is True:
try:
VMType = guest.info["virt"].get_info()["VMType"].upper()
except:
VMType = "KVM"
self.view.VMType = VMType
# Network
phydev = []
phydev_regex = re.compile(r"%s" % bridge_prefix[VMType])
for dev,dev_info in get_ifconfig_info().iteritems():
try:
if phydev_regex.match(dev):
phydev.append(dev)
except:
pass
if len(phydev) == 0:
phydev.append("%s0" % bridge_prefix[VMType])
phydev.sort()
self.view.phydev = phydev # Physical device
self.view.virnet = sorted(self.kvc.list_active_network()) # Virtual device
self.view.mac_address = generate_mac_address() # new mac address
# Disk
inactive_pool = []
active_pool = self.kvc.list_active_storage_pool()
pools = inactive_pool + active_pool
pools.sort()
if not pools:
return web.badrequest('One can not start a storage pool.')
pools_info = {}
pools_vols_info = {}
pools_iscsi_blocks = {}
already_vols = []
guests = []
guests += self.kvc.list_inactive_guest()
guests += self.kvc.list_active_guest()
for guest in guests:
already_vol = self.kvc.get_storage_volume_bydomain(domain=guest,
image_type=None,
attr='path')
if already_vol:
already_vols += already_vol.keys()
for pool in pools:
pool_obj = self.kvc.search_kvn_storage_pools(pool)[0]
if pool_obj.is_active() is True:
pools_info[pool] = pool_obj.get_info()
blocks = None
if pools_info[pool]['type'] == 'iscsi':
blocks = self.kvc.get_storage_volume_iscsi_block_bypool(pool)
if blocks:
pools_iscsi_blocks[pool] = []
vols_obj = pool_obj.search_kvn_storage_volumes(self.kvc)
vols_info = {}
for vol_obj in vols_obj:
vol_name = vol_obj.get_storage_volume_name()
vols_info[vol_name] = vol_obj.get_info()
if blocks:
if vol_name in blocks and vol_name not in already_vols:
pools_iscsi_blocks[pool].append(vol_obj.get_info())
pools_vols_info[pool] = vols_info
self.view.pools = pools
self.view.pools_info = pools_info
self.view.pools_vols_info = pools_vols_info
self.view.pools_iscsi_blocks = pools_iscsi_blocks
if VMType == "KVM":
self.view.DISK_FORMATS = DISK_QEMU_FORMAT
else:
self.view.DISK_FORMATS = DISK_NON_QEMU_FORMAT
self.view.bus_types = self.kvc.bus_types
else: # .part
self.view.ifinfo = virt.get_interface_info() # interface info
self.view.disk_info = virt.get_disk_info() # Disk info
finally:
self.kvc.close()
return True
@auth
def _POST(self, *param, **params):
(host_id, guest_id) = self.chk_guestby1(param)
if guest_id is None: return web.notfound()
model = findbyguest1(self.orm, guest_id)
# virt
kvc = KaresansuiVirtConnection()
try:
domname = kvc.uuid_to_domname(model.uniq_key)
if not domname: return web.conflict(web.ctx.path)
virt = kvc.search_kvg_guests(domname)[0]
nic_info = virt.get_interface_info()
# -- Nic
if self.input.device_type == "nic":
if not validates_nic(self):
return web.badrequest(self.view.alert)
f_chk = True
for x in nic_info:
if x['mac']['address'] == self.input.mac_address:
f_chk = False
break
if f_chk is False:
return web.badrequest(_('Specified MAC address is already defined.'))
mac = self.input.mac_address
bridge = None
network = None
if self.input.nic_type == "phydev":
bridge = self.input.phydev
elif self.input.nic_type == "virnet":
network = self.input.virnet
self.logger.debug('spinning off create_nic_job dom=%s, mac=%s, bridge=%s, network=%s' \
% (domname, mac, bridge, network))
create_nic_job(self,model,domname,mac,bridge,network)
return web.accepted()
# -- Disk
elif self.input.device_type == "disk":
if not validates_disk(self):
return web.badrequest(self.view.alert)
volume_job = None
order = 0
if self.input.pool_type == "dir" or self.input.pool_type == "fs": # create(dir)
disk_type = 'file'
pool_name = self.input.pool_dir
volume_name = string_from_uuid(generate_uuid())
volume_job = create_storage_volume_dir(self,
model,
domname,
volume_name,
self.input.pool_dir,
self.input.disk_format,
self.input.disk_size,
self.input.disk_size,
'M',
order)
order += 1
elif self.input.pool_type == "block": # create(iscsi block)
disk_type = 'iscsi'
(iscsi_pool, iscsi_volume) = self.input.pool_dir.split("/", 2)
pool_name = iscsi_pool
volume_name = iscsi_volume
else:
return badrequest(_("No storage type specified."))
# add disk
disk_job = create_disk_job(self,
guest=model,
domain_name=domname,
pool=pool_name,
volume=volume_name,
bus=self.input.bus_type,
format=self.input.disk_format,
type=disk_type,
order=order)
order += 1
if exec_disk_job(obj=self,
guest=model,
disk_job=disk_job,
volume_job=volume_job,
order=order
) is True:
return web.accepted()
else:
return False
else: # Not Found
return False
finally:
kvc.close()
urls = (
'/host/(\d+)/guest/(\d+)/device/?(\.part|\.html)?$', GuestBy1Device,
)
|
karesansui/karesansui
|
karesansui/gadget/guestby1device.py
|
Python
|
mit
| 11,405
|
# -*- coding: utf-8 -*-
"""
@copyright Copyright (c) 2013 Submit Consulting
@author Angel Sullon (@asullom)
@package utils
Descripcion: Clases para controlar la seguridad de la información en la nube
"""
from apps.utils.messages import Message
import datetime
import random
import hashlib
from array import *
from django.shortcuts import redirect
#import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
from django.contrib.auth.models import User, Group, Permission
from django.db.models import Q
from django.http import HttpResponse
class DataAccessToken:
"""
Clase que permite almacenar y recuperar los permisos a datos de las empresas solicitados por los usuarios.
"""
@staticmethod
def set_association_id(request, association_id):
request.session['association_id'] = association_id
@staticmethod
def get_association_id(session):
return session.get('association_id', False)
@staticmethod
def set_enterprise_id(request, enterprise_id):
request.session['enterprise_id'] = enterprise_id
@staticmethod
def get_enterprise_id(session):
return session.get('enterprise_id', False)
@staticmethod
def set_headquar_id(request, headquar_id):
request.session['headquar_id'] = headquar_id
@staticmethod
def get_headquar_id(session):
return session.get('headquar_id', False)
@staticmethod
def set_grupo_id_list(request , grupo_id_list):
request.session['grupo_id_list'] = grupo_id_list
@staticmethod
def get_grupo_id_list(session):
return session.get('grupo_id_list', False)
class SecurityKey:
"""
Clase que permite crear llave de seguridad en las url.
"""
TEXT_KEY = 'lyHyRajh987r.P~CFCcJ[AvFKdz|86'
# Método para generar las llaves de seguridad
@staticmethod
def get_key(id, action_name):
"""
Genera una llave de seguridad válida durante todo el día %Y-%m-%d
Entrada::
id=1
action_name="user_upd"
Salida::
1.dfad09debee34f8e85fccc5adaa2dadb
"""
key = "%s%s" % (SecurityKey.TEXT_KEY, datetime.datetime.now().strftime('%Y-%m-%d'))
m = hashlib.md5("%s%s%s" % (id, key, action_name))
key = m.hexdigest()
return u"%s.%s" % (id, key)
# Método para verificar si la llave es válida
@staticmethod
def is_valid_key(request, key_value, action_name):
"""
Genera una llave de seguridad válida durante todo el día %Y-%m-%d
Entrada::
key_value=1.dfad09debee34f8e85fccc5adaa2dadb
action_name="user_upd"
Salida::
1
"""
key = key_value.split('.')
_id = key[0]
valid_key = SecurityKey.get_key(_id, action_name)
valid = (True if valid_key == key_value else False)
if not valid:
# raise Exception(("Acceso denegado. La llave de seguridad es incorrecta."))
Message.error(request, ('Acceso denegado. La llave de seguridad es incorrecta.'))
return False
# print 'key_value(%s) = valid_key(%s)' % (key_value, valid_key)
# Message.info(request,('key_value(%s) = valid_key(%s)' % (key_value, valid_key)))
return _id
class Redirect:
"""
Clase que permite re-dirigir a un controller, cuaya solicitud se haya realizado con ajax o no
Antes::
if request.is_ajax():
request.path="/params/locality/index/" #/app/controller_path/action/$params
return locality_index(request)
else:
return redirect("/params/locality/index/")
Ahora solo use (Example)::
return Redirect.to(request, "/sad/user/index/")
return Redirect.to_action(request, "index")
"""
@staticmethod
def to(request, route, params=None):
"""
route_list[0] = app
route_list[1] = controller
route_list[2] = action
"""
route = route.strip("/")
route_list = route.split("/")
app_name = route_list[0]
controller_name = ""
action_name = ""
if len(route_list) > 1:
controller_name = route_list[1]
else:
raise Exception(("Route no tiene controller"))
if len(route_list) > 2:
action_name = route_list[2]
app = ("apps.%s.views") % app_name
path = "/%s/%s/" % (app_name, controller_name)
func = "%s" % (controller_name)
if action_name:
path = "/%s/%s/%s/" % (app_name, controller_name, action_name)
func = "%s_%s" % (controller_name, action_name)
if request.is_ajax():
mod = __import__(app, fromlist=[func])
methodToCall = getattr(mod, func)
# Message.error(request, "ajax %s"%path)
request.path = path # /app/controller_path/action/$params
return methodToCall(request)
else:
# Message.error(request, "noajax %s"%path)
return redirect(path)
@staticmethod
def to_action(request, action_name, params=None):
"""
route_list[0] = app
route_list[1] = controller
route_list[2] = action
"""
route = request.path
route = route.strip("/")
route_list = route.split("/")
app_name = route_list[0]
controller_name = ""
# action_name=""
if len(route_list) > 1:
controller_name = route_list[1]
else:
raise Exception(("Route no tiene controller"))
# if len(route_list) > 2:
# action_name = route_list[2]
app = ("apps.%s.views") % app_name
path = "/%s/%s/" % (app_name, controller_name)
func = "%s" % (controller_name)
if action_name:
path = "/%s/%s/%s/" % (app_name, controller_name, action_name)
func = "%s_%s" % (controller_name, action_name)
# Message.error(request, "path= %s"%path)
# Message.error(request, "func= %s"%func)
if request.is_ajax():
mod = __import__(app, fromlist=[func])
methodToCall = getattr(mod, func)
# Message.error(request, "ajax %s"%path)
request.path = path # /app/controller_path/action/$params
return methodToCall(request)
else:
# Message.error(request, "noajax %s"%path)
return redirect(path)
|
submitconsulting/backenddj
|
apps/utils/security.py
|
Python
|
bsd-3-clause
| 5,806
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Audio tools for recording and analyzing audio.
The audio tools provided here are mainly to:
- record playing audio.
- remove silence from beginning and end of audio file.
- compare audio files using PESQ tool.
The tools are supported on Windows and Linux.
"""
import commands
import ctypes
import logging
import os
import re
import subprocess
import sys
import threading
import time
import pyauto_media
import pyauto
_TOOLS_PATH = os.path.abspath(os.path.join(pyauto.PyUITest.DataDir(),
'pyauto_private', 'media', 'tools'))
WINDOWS = 'win32' in sys.platform
if WINDOWS:
_PESQ_PATH = os.path.join(_TOOLS_PATH, 'pesq.exe')
_SOX_PATH = os.path.join(_TOOLS_PATH, 'sox.exe')
_AUDIO_RECORDER = r'SoundRecorder.exe'
else:
_PESQ_PATH = os.path.join(_TOOLS_PATH, 'pesq')
_SOX_PATH = commands.getoutput('which sox')
_AUDIO_RECORDER = commands.getoutput('which arecord')
_PACMD_PATH = commands.getoutput('which pacmd')
class AudioRecorderThread(threading.Thread):
"""A thread that records audio out of the default audio output."""
def __init__(self, duration, output_file, record_mono=False):
threading.Thread.__init__(self)
self.error = ''
self._duration = duration
self._output_file = output_file
self._record_mono = record_mono
def run(self):
"""Starts audio recording."""
if WINDOWS:
if self._record_mono:
raise Exception("Mono recording not supported on Windows yet!")
duration = time.strftime('%H:%M:%S', time.gmtime(self._duration))
cmd = [_AUDIO_RECORDER, '/FILE', self._output_file, '/DURATION',
duration]
# This is needed to run SoundRecorder.exe on Win-64 using Python-32 bit.
ctypes.windll.kernel32.Wow64DisableWow64FsRedirection(
ctypes.byref(ctypes.c_long()))
else:
num_channels = 1 if self._record_mono else 2
cmd = [_AUDIO_RECORDER, '-d', self._duration, '-f', 'dat', '-c',
str(num_channels), self._output_file]
cmd = [str(s) for s in cmd]
logging.debug('Running command: %s', ' '.join(cmd))
returncode = subprocess.call(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if returncode != 0:
self.error = 'Failed to record audio.'
else:
logging.debug('Finished recording audio into %s.', self._output_file)
def RunPESQ(audio_file_ref, audio_file_test, sample_rate=16000):
"""Runs PESQ to compare audio test file to a reference audio file.
Args:
audio_file_ref: The reference audio file used by PESQ.
audio_file_test: The audio test file to compare.
sample_rate: Sample rate used by PESQ algorithm, possible values are only
8000 or 16000.
Returns:
A tuple of float values representing PESQ scores of the audio_file_ref and
audio_file_test consecutively.
"""
# Work around a bug in PESQ when the ref file path is > 128 chars. PESQ will
# compute an incorrect score then (!), and the relative path to the ref file
# should be a lot shorter than the absolute one.
audio_file_ref = os.path.relpath(audio_file_ref)
cmd = [_PESQ_PATH, '+%d' % sample_rate, audio_file_ref, audio_file_test]
logging.debug('Running command: %s', ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
logging.error('Error running pesq: %s\n%s', output, error)
# Last line of PESQ output shows the results. Example:
# P.862 Prediction (Raw MOS, MOS-LQO): = 4.180 4.319
result = re.search('Prediction.*= (\d{1}\.\d{3})\t(\d{1}\.\d{3})',
output)
if not result or len(result.groups()) != 2:
return None
return (float(result.group(1)), float(result.group(2)))
def RemoveSilence(input_audio_file, output_audio_file):
"""Removes silence from beginning and end of the input_audio_file.
Args:
input_audio_file: The audio file to remove silence from.
output_audio_file: The audio file to save the output audio.
"""
# SOX documentation for silence command: http://sox.sourceforge.net/sox.html
# To remove the silence from both beginning and end of the audio file, we call
# sox silence command twice: once on normal file and again on its reverse,
# then we reverse the final output.
# Silence parameters are (in sequence):
# ABOVE_PERIODS: The period for which silence occurs. Value 1 is used for
# silence at beginning of audio.
# DURATION: the amount of time in seconds that non-silence must be detected
# before sox stops trimming audio.
# THRESHOLD: value used to indicate what sample value is treates as silence.
ABOVE_PERIODS = '1'
DURATION = '2'
THRESHOLD = '5%'
cmd = [_SOX_PATH, input_audio_file, output_audio_file, 'silence',
ABOVE_PERIODS, DURATION, THRESHOLD, 'reverse', 'silence',
ABOVE_PERIODS, DURATION, THRESHOLD, 'reverse']
logging.debug('Running command: %s', ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
logging.error('Error removing silence from audio: %s\n%s', output, error)
def ForceMicrophoneVolumeTo100Percent():
if WINDOWS:
logging.error('Volume forcing not implemented on Windows yet.')
else:
# The recording device id is machine-specific. We assume here it is called
# Monitor of render (which corresponds to the id render.monitor). You can
# list the available recording devices with pacmd list-sources.
RECORDING_DEVICE_ID = 'render.monitor'
HUNDRED_PERCENT_VOLUME = '65536'
cmd = [_PACMD_PATH, 'set-source-volume', RECORDING_DEVICE_ID,
HUNDRED_PERCENT_VOLUME]
logging.debug('Running command: %s', ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
logging.error('Error forcing mic volume to 100%%: %s\n%s', output, error)
|
loopCM/chromium
|
chrome/test/functional/media/audio_tools.py
|
Python
|
bsd-3-clause
| 6,222
|
#!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import math
import shutil
# Import Broadband modules
import plot_srf
import bband_utils
from irikura_gen_srf_cfg import IrikuraGenSrfCfg
from install_cfg import InstallCfg
class IrikuraGenSrf(object):
"""
Implements Arben's gen_srf.csh script in Python
"""
def __init__(self, i_r_velmodel, i_r_srcfile,
o_r_srffile, i_vmodel_name, sim_id=0,
**kwargs):
self.sim_id = sim_id
self.r_velmodel = i_r_velmodel
self.r_srcfile = i_r_srcfile
self.r_srffile = o_r_srffile
self.vmodel_name = i_vmodel_name
self.r_srcfiles = []
# Get all src files that were passed to us
if kwargs is not None and len(kwargs) > 0:
for idx in range(len(kwargs)):
self.r_srcfiles.append(kwargs['src%d' % (idx)])
else:
# Not a multisegment run, just use the single src file
self.r_srcfiles.append(i_r_srcfile)
def run(self):
"""
This function prepares the parameters for Irikura's gen_srf then calls it
"""
print("IrikuraGenSrf".center(80, '-'))
# Load configuration, set sim_id
install = InstallCfg.getInstance()
sim_id = self.sim_id
# Build directory paths
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id))
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id))
a_param_outdir = os.path.join(a_outdir, "param_files")
# Make sure the output and tmp directories exist
bband_utils.mkdirs([a_tmpdir, a_indir, a_outdir,
a_logdir, a_param_outdir])
# Now, file paths
self.log = os.path.join(a_logdir, "%d.gen_srf.log" % (sim_id))
a_srcfiles = [os.path.join(a_indir,
srcfile) for srcfile in self.r_srcfiles]
# Read src file
cfg = IrikuraGenSrfCfg(a_srcfiles)
# Define location of input velocity model and output srf file
if cfg.num_srcfiles > 1:
a_srffile = os.path.join(a_tmpdir, self.r_srffile)
a_final_srffile = os.path.join(a_indir, self.r_srffile)
else:
a_srffile = os.path.join(a_indir, self.r_srffile)
a_velmod = os.path.join(install.A_IN_DATA_DIR, str(sim_id),
self.r_velmodel)
# Run in tmpdir subdir to isolate temp fortran files
# Save cwd, change back to it at the end
old_cwd = os.getcwd()
os.chdir(a_tmpdir)
# Read parameters from the src(s) file(s)
# The following parameters should be common to all SRC files
# So we just read from the first one
simulation_seed = int(cfg.CFGDICT[0]['seed'])
dip = cfg.CFGDICT[0]['dip']
rake = cfg.CFGDICT[0]['rake']
dlen = cfg.CFGDICT[0]['dlen']
dwid = cfg.CFGDICT[0]['dwid']
lon_top_center = cfg.CFGDICT[0]['lon_top_center']
lat_top_center = cfg.CFGDICT[0]['lat_top_center']
depth_to_top = cfg.CFGDICT[0]['depth_to_top']
if cfg.num_srcfiles > 1:
fault_len = cfg.CFGDICT[0]['max_fault_length']
else:
fault_len = cfg.CFGDICT[0]['fault_length']
fault_width = cfg.CFGDICT[0]['fault_width']
# Average strike of all SRC files
strike = 0.0
for segment in range(cfg.num_srcfiles):
strike = strike + cfg.CFGDICT[segment]['strike']
strike = math.ceil(strike / cfg.num_srcfiles)
# Hypocenter (down_dip is common to all src files)
hypo_down_dip = cfg.CFGDICT[0]['hypo_down_dip']
if cfg.num_srcfiles > 1:
hypo_along_stk = 0.0
for segment in range(cfg.num_srcfiles):
current_fault_len = cfg.CFGDICT[segment]['fault_length']
current_hypo_along_stk = cfg.CFGDICT[segment]['hypo_along_stk']
if abs(current_hypo_along_stk) <= current_fault_len:
# Hypocenter in this segment!
hypo_along_stk = hypo_along_stk + (current_fault_len / 2.0) + current_hypo_along_stk
break
else:
# Not here yet, just add the total length of this segment
hypo_along_stk = hypo_along_stk + current_fault_len
# Now convert hypo_along_stk so that 0.0 is the middle of the fault
hypo_along_stk = hypo_along_stk - (fault_len / 2.0)
else:
hypo_along_stk = cfg.CFGDICT[0]['hypo_along_stk']
#
# Run gen_srf code
#
progstring = ("%s >> %s 2>&1 << END\n" %
(os.path.join(install.A_IRIKURA_BIN_DIR, cfg.GENSRF),
self.log) +
"%s\n" % a_srffile +
"%f %f %f %f %f\n" %
(fault_len, fault_width,
strike, dip, rake) +
"%f %f %f\n" %
(lon_top_center, lat_top_center, depth_to_top) +
"%f %f\n" % (dlen, dwid) +
"%f %f %f %f\n" %
(hypo_along_stk, hypo_down_dip,
cfg.DENS, cfg.VS) +
"%f\n" % (cfg.DT) +
"%d\n" % (simulation_seed) +
"%s\n" % (a_velmod) +
"%f\n" % (cfg.VEL_RUP_FRAC) +
"END")
bband_utils.runprog(progstring)
if cfg.num_srcfiles > 1:
# Assign the slip from the planar fault to each segment's SRF file
a_segs_file = os.path.join(a_tmpdir, "segments.midpoint.txt")
# Write segments' file
seg_file = open(a_segs_file, 'w')
seg_file.write("segm lon lat depth fleng fwidth shypo zhypo strike dip rake\n")
seg_file.write("%d\n" % (cfg.num_srcfiles))
total_length = 0.0
for segment in range(cfg.num_srcfiles):
if abs(cfg.CFGDICT[segment]['hypo_along_stk']) <= cfg.CFGDICT[segment]['fault_length']:
hypo_along_stk = cfg.CFGDICT[segment]['hypo_along_stk']
hypo_down_dip = cfg.CFGDICT[segment]['hypo_down_dip']
else:
hypo_along_stk = 999.0
hypo_down_dip = 999.0
seg_file.write("seg%d %.6f %.6f %.1f %.1f %.1f %.1f %.1f %.1f %d %d %d\n" %
(segment + 1,
cfg.CFGDICT[segment]['lon_top_center'],
cfg.CFGDICT[segment]['lat_top_center'],
cfg.CFGDICT[segment]['depth_to_top'],
total_length,
(total_length + cfg.CFGDICT[segment]['fault_length']),
cfg.CFGDICT[segment]['fault_width'],
hypo_along_stk, hypo_down_dip,
cfg.CFGDICT[segment]['strike'],
cfg.CFGDICT[segment]['dip'],
cfg.CFGDICT[segment]['rake']))
total_length = total_length + cfg.CFGDICT[segment]['fault_length']
seg_file.close()
#
# Run gen_srf_segment code
#
for segment in range(cfg.num_srcfiles):
progstring = ("%s >> %s 2>&1 << END\n" %
(os.path.join(install.A_IRIKURA_BIN_DIR,
cfg.GENSRFSEGMENT), self.log) +
".\n" +
"%s\n" % (self.r_srffile) +
"./segments.midpoint.txt\n" +
"%d\n" % (segment + 1) +
"%f %f\n" % (dlen, dwid) +
"END")
# Run code
bband_utils.runprog(progstring)
#
# Now add the segments together
#
progstring = ("%s >> %s 2>&1 << END\n" %
(os.path.join(install.A_IRIKURA_BIN_DIR,
cfg.SUMSEG), self.log) +
".\n" +
"%s\n" % (self.r_srffile) +
"./segments.midpoint.txt\n" +
"%d\n" % (cfg.num_srcfiles) +
"%f %f\n" % (dlen, dwid) +
"END")
# Run code
bband_utils.runprog(progstring)
# Copy file to final location
progstring = "cp %s %s" % (os.path.join(a_tmpdir,
"all_seg.%s" %
(self.r_srffile)),
a_final_srffile)
bband_utils.runprog(progstring)
# Use copied file from now on
a_srffile = a_final_srffile
# Restore working directory
os.chdir(old_cwd)
#
# Move results to outputfile
#
progstring = "cp %s %s" % (a_srffile,
os.path.join(a_tmpdir, self.r_srffile))
bband_utils.runprog(progstring)
progstring = "cp %s %s" % (a_srffile,
os.path.join(a_outdir, self.r_srffile))
bband_utils.runprog(progstring)
shutil.copy2(os.path.join(a_tmpdir, "stress_drop.out"),
os.path.join(a_param_outdir,
"stress_drop.out"))
# Plot SRF
plot_srf.run(self.r_srffile, sim_id=self.sim_id)
print("IrikuraGenSrf Completed".center(80, '-'))
if __name__ == "__main__":
print("Testing Module: %s" % os.path.basename((sys.argv[0])))
ME = IrikuraGenSrf(sys.argv[1], sys.argv[2], sys.argv[3],
sys.argv[4], sim_id=int(sys.argv[5]))
ME.run()
|
SCECcode/BBP
|
bbp/comps/irikura_gen_srf.py
|
Python
|
apache-2.0
| 10,817
|
__author__ = 'Calle Svensson <calle.svensson@zeta-two.com>'
# Constants
BYTE_MAX = 256
INF = 1 << 63
__all__ = ['cryptanalysis', 'ciphers', 'conversions', 'mathtools', 'utility', 'BYTE_MAX', 'INF']
|
ZetaTwo/zetacrypto
|
zetacrypt/__init__.py
|
Python
|
mit
| 200
|
from alarm.models import UserProfile, Log, Alert, AlarmStateConfiguration
from rest_framework import viewsets
from serializers import UserSerializer, LogSerializer, AlertSerializer, AlarmStateConfigurationSerializer
""" ViewSets define the view behavior. """
class UserProfileViewSet(viewsets.ModelViewSet):
queryset = UserProfile.objects.all()
serializer_class = UserSerializer
class LogViewSet(viewsets.ModelViewSet):
queryset = Log.objects.all()
serializer_class = LogSerializer
class AlertViewSet(viewsets.ModelViewSet):
queryset = Alert.objects.all()
serializer_class = AlertSerializer
class AlarmStateConfigurationSet(viewsets.ModelViewSet):
queryset = AlarmStateConfiguration.objects.all()
serializer_class = AlarmStateConfigurationSerializer
|
Silvian/alarm-service
|
api/views.py
|
Python
|
gpl-3.0
| 797
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deals', '0006_auto_20150901_1541'),
]
operations = [
migrations.AddField(
model_name='deal',
name='contacted_by',
field=models.IntegerField(blank=True, max_length=255, null=True, verbose_name='contacted us by', choices=[(0, 'Quote'), (1, 'Contact form'), (2, 'Phone'), (3, 'Web chat'), (4, 'E-mail'), (5, 'Other')]),
preserve_default=True,
),
migrations.AddField(
model_name='deal',
name='found_through',
field=models.IntegerField(blank=True, max_length=255, null=True, verbose_name='found us through', choices=[(0, 'Search engine'), (1, 'Social media'), (2, 'Talk with employee'), (3, 'Existing customer'), (4, 'Other')]),
preserve_default=True,
),
]
|
HelloLily/hellolily
|
lily/deals/migrations/0007_auto_20150902_1543.py
|
Python
|
agpl-3.0
| 972
|
import IMP
import IMP.test
import IMP.atom
def get_all_atoms(pdb):
atoms = {}
residues = IMP.atom.get_by_type(pdb, IMP.atom.RESIDUE_TYPE)
for ni, res in enumerate(residues):
resatoms = IMP.atom.get_by_type(res, IMP.atom.ATOM_TYPE)
for a in resatoms:
aid = '%d:' % (ni + 1) \
+ IMP.atom.Atom(a).get_atom_type().get_string()
atoms[aid] = a.get_particle()
return atoms
class Tests(IMP.test.TestCase):
"""Test CHARMM stereochemistry restraint"""
def setup_restraint(self):
m = IMP.Model()
pdb = IMP.atom.read_pdb(
self.get_input_file_name('charmm_type_test.pdb'), m)
ff = IMP.atom.get_heavy_atom_CHARMM_parameters()
topology = ff.create_topology(pdb)
topology.apply_default_patches()
topology.setup_hierarchy(pdb)
r = IMP.atom.CHARMMStereochemistryRestraint(pdb, topology)
return r, m, pdb
def assertContainsPair(self, pf, atoms, a1, a2):
self.assertEquals(pf.get_value([atoms[a1], atoms[a2]]),
True)
def test_score(self):
"""Test CHARMMStereochemistryRestraint::evaluate()"""
r, m, pdb = self.setup_restraint()
score = r.evaluate(False)
self.assertAlmostEqual(score, 2.90562, delta=0.02)
def test_pair_filter(self):
"""Test CHARMMStereochemistryRestraint pair filter"""
r, m, pdb = self.setup_restraint()
pf = r.get_pair_filter()
atoms = get_all_atoms(pdb)
# Bonds (1-2 pairs)
self.assertContainsPair(pf, atoms, '1:N', '1:CA')
self.assertContainsPair(pf, atoms, '1:C', '2:N')
# Angles (1-3 pairs)
self.assertContainsPair(pf, atoms, '1:CA', '1:OG')
# Dihedrals (1-4 pairs)
self.assertContainsPair(pf, atoms, '1:N', '1:OG')
def test_get_inputs(self):
"""Test CHARMMStereochemistryRestraint get_inputs()"""
r, m, pdb = self.setup_restraint()
ps = r.get_inputs()
self.assertEqual(len(ps), 190)
class SelectionTests(IMP.test.TestCase):
"""Test CHARMM stereochemistry restraint with seletion"""
def setup_restraint(self):
m = IMP.Model()
pdb = IMP.atom.read_pdb(
self.get_input_file_name('charmm_type_test.pdb'), m)
ff = IMP.atom.get_heavy_atom_CHARMM_parameters()
topology = ff.create_topology(pdb)
topology.apply_default_patches()
topology.setup_hierarchy(pdb)
ff.add_radii(pdb)
ff.add_well_depths(pdb)
sel = IMP.atom.Selection(pdb,residue_index=2)
r = IMP.atom.CHARMMStereochemistryRestraint(pdb, topology,
sel.get_selected_particles())
return r, m, pdb
def assertContainsPair(self, pf, atoms, a1, a2):
self.assertEquals(pf.get_value([atoms[a1], atoms[a2]]),
True)
def assertDoesNotContainsPair(self, pf, atoms, a1, a2):
self.assertEquals(pf.get_value([atoms[a1], atoms[a2]]),
False)
def test_limited_pair_filter(self):
"""Test CHARMMStereochemistryRestraint pair filter"""
r, m, pdb = self.setup_restraint()
pf = r.get_pair_filter()
atoms = get_all_atoms(pdb)
# Bonds (1-2 pairs)
self.assertContainsPair(pf, atoms, '1:N', '1:CA')
self.assertDoesNotContainsPair(pf, atoms, '1:C', '2:N')
# Angles (1-3 pairs)
self.assertContainsPair(pf, atoms, '1:CA', '1:OG')
# Dihedrals (1-4 pairs)
self.assertContainsPair(pf, atoms, '1:N', '1:OG')
def test_limited_get_inputs(self):
"""Test CHARMMStereochemistryRestraint get_inputs()"""
r, m, pdb = self.setup_restraint()
ps = r.get_inputs()
self.assertEqual(len(ps), 75)
if __name__ == '__main__':
IMP.test.main()
|
shanot/imp
|
modules/atom/test/test_charmm_stereochemistry.py
|
Python
|
gpl-3.0
| 3,890
|
##########################################################################
#
# Copyright (c) 2009-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import random
import os
import maya.cmds
import IECore
import IECoreMaya
class SplineParameterHandlerTest( IECoreMaya.TestCase ) :
class TestClassFloat( IECore.Parameterised ) :
def __init__( self ) :
IECore.Parameterised.__init__( self, "description" )
self.parameters().addParameter(
IECore.SplineffParameter(
name = "spline",
description = "description",
defaultValue = IECore.SplineffData(
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
(
( 0, 1 ),
( 0, 1 ),
( 1, 0 ),
( 1, 0 ),
),
),
),
)
)
class TestClassColor( IECore.Parameterised ) :
def __init__( self ) :
IECore.Parameterised.__init__( self, "description" )
self.parameters().addParameter(
IECore.SplinefColor3fParameter(
name = "spline",
description = "description",
defaultValue = IECore.SplinefColor3fData(
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
(
( 0, IECore.Color3f( 1, 1, 1 ) ),
( 0, IECore.Color3f( 1, 1, 1 ) ),
( 1, IECore.Color3f( 0, 0, 0 ) ),
( 1, IECore.Color3f( 0, 0, 0 ) ),
),
),
),
)
)
def testRoundTripFloat( self ) :
node = maya.cmds.createNode( "ieParameterisedHolderNode" )
parameterised = SplineParameterHandlerTest.TestClassFloat()
fnPH = IECoreMaya.FnParameterisedHolder( node )
fnPH.setParameterised( parameterised )
random.seed( 199 )
numTests = 100
for i in range( 0, numTests ) :
numPoints = int( random.random() * 12 ) + 2
splinePoints = []
for j in range( 0, numPoints ) :
splinePoints.append( ( random.random(), random.random() ) )
splinePoints.sort()
splinePoints.insert( 0, splinePoints[0] )
splinePoints.append( splinePoints[-1] )
assert( len( splinePoints ) >= 4 )
splineData = IECore.SplineffData(
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
splinePoints
)
)
parameterised.parameters()["spline"].setValue( splineData )
# Put the value to the node's attributes
fnPH.setNodeValue( parameterised.parameters()["spline"], False )
# Retrieve the value from the node's attributes
fnPH.setParameterisedValue( parameterised.parameters()["spline"] )
# The parameter value should not have changed
data = parameterised.parameters()["spline"].getValue()
self.assertEqual( len( data.value ), len( splineData.value ) )
for i in range( 0, len( data.value ) ) :
self.assertAlmostEqual( data.value.keys()[i], splineData.value.keys()[i] )
self.assertAlmostEqual( data.value.values()[i], splineData.value.values()[i] )
def testRoundTripColor( self ) :
node = maya.cmds.createNode( "ieParameterisedHolderNode" )
parameterised = SplineParameterHandlerTest.TestClassColor()
fnPH = IECoreMaya.FnParameterisedHolder( node )
fnPH.setParameterised( parameterised )
random.seed( 205 )
numTests = 10
for i in range( 0, numTests ) :
numPoints = int( random.random() * 12 ) + 2
splinePoints = []
for j in range( 0, numPoints ) :
splinePoints.append( ( random.random(), IECore.Color3f( random.random(), random.random(), random.random() ) ) )
splinePoints.sort()
splinePoints.insert( 0, splinePoints[0] )
splinePoints.append( splinePoints[-1] )
assert( len( splinePoints ) >= 4 )
splineData = IECore.SplinefColor3fData(
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
splinePoints
)
)
parameterised.parameters()["spline"].setValue( splineData )
# Put the value to the node's attributes
fnPH.setNodeValue( parameterised.parameters()["spline"], False )
# Retrieve the value from the node's attributes
fnPH.setParameterisedValue( parameterised.parameters()["spline"] )
# The parameter value should not have changed
data = parameterised.parameters()["spline"].getValue()
self.assertEqual( len( data.value ), len( splineData.value ) )
for i in range( 0, len( data.value ) ) :
self.assertAlmostEqual( data.value.keys()[i], splineData.value.keys()[i] )
c1 = data.value.values()[i]
c2 = splineData.value.values()[i]
v1 = IECore.V3f( c1[0], c1[1], c1[2] )
v2 = IECore.V3f( c2[0], c2[1], c2[2] )
self.assert_( ( v1 - v2 ).length() < 1.e-4 )
def testRoundTripAfterSerialisation( self ) :
# make a scene with an OpHolder holding an op with a spline parameter
fnOH = IECoreMaya.FnOpHolder.create( "test", "splineInput", 1 )
opNode = fnOH.fullPathName()
op = fnOH.getOp()
# save it
maya.cmds.file( rename = os.getcwd() + "/test/IECoreMaya/splineParameterHandlerTest.ma" )
sceneFileName = maya.cmds.file( force = True, type = "mayaAscii", save = True )
# load it
maya.cmds.file( new=True, force=True )
maya.cmds.file( sceneFileName, force=True, open=True )
fnOH = IECoreMaya.FnOpHolder( opNode )
op = fnOH.getOp()
# stick a new value on
splineData = IECore.SplineffData(
IECore.Splineff(
IECore.CubicBasisf.catmullRom(), (
( 0, 0.644737 ),
( 0, 0.644737 ),
( 0.257426, 0.0789474 ),
( 1, -0.3 ),
( 1, -0.3 )
)
)
)
op["spline"].setValue( splineData )
# convert the value to maya
fnOH.setNodeValue( op["spline"] )
# convert it back
fnOH.setParameterisedValue( op["spline"] )
# make sure it worked
splineData2 = op["spline"].getValue()
self.assertEqual( splineData, splineData2 )
# do it all again just for kicks
op["spline"].setValue( splineData )
fnOH.setNodeValue( op["spline"] )
fnOH.setParameterisedValue( op["spline"] )
splineData2 = op["spline"].getValue()
self.assertEqual( splineData, splineData2 )
def testSparseEntries( self ) :
# load a scene where we have a spline parameter with sparse entries.
maya.cmds.file( os.getcwd() + "/test/IECoreMaya/scenes/splineWithSparseEntries.ma", force=True, open=True )
fnOH = IECoreMaya.FnOpHolder( "test" )
op = fnOH.getOp()
# stick a new value on
splineData = IECore.SplineffData(
IECore.Splineff(
IECore.CubicBasisf.catmullRom(), (
( 0, 0.644737 ),
( 0, 0.644737 ),
( 0.257426, 0.0789474 ),
( 1, -0.3 ),
( 1, -0.3 )
)
)
)
op["spline"].setValue( splineData )
# convert the value to maya
fnOH.setNodeValue( op["spline"] )
# convert it back
fnOH.setParameterisedValue( op["spline"] )
# make sure it worked
splineData2 = op["spline"].getValue()
self.assertEqual( splineData, splineData2 )
# do it all again just for kicks
op["spline"].setValue( splineData )
fnOH.setNodeValue( op["spline"] )
fnOH.setParameterisedValue( op["spline"] )
splineData2 = op["spline"].getValue()
self.assertEqual( splineData, splineData2 )
def testAddColorSplineToReferencedNode( self ) :
# make a scene with an empty op holder
######################################
maya.cmds.createNode( "ieOpHolderNode" )
maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "opHolderReference.ma" ) )
referenceScene = maya.cmds.file( force = True, type = "mayaAscii", save = True )
# reference it in and add an op with a color spline
###################################################
maya.cmds.file( new = True, force = True )
maya.cmds.file( referenceScene, reference = True, namespace = "ns1" )
fnOH = IECoreMaya.FnOpHolder( "ns1:ieOpHolderNode1" )
fnOH.setOp( "colorSplineInput", 1 )
fnOH.setParameterisedValues()
self.assertEqual(
fnOH.getOp()["spline"].getValue().value,
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
(
( 0, IECore.Color3f( 1 ) ),
( 0, IECore.Color3f( 1 ) ),
( 1, IECore.Color3f( 0 ) ),
( 1, IECore.Color3f( 0 ) ),
),
)
)
# save the scene, and reload it. check that we've worked
# around another wonderful maya referencing bug
########################################################
maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "opHolderReferencer.ma" ) )
referencerScene = maya.cmds.file( force = True, type = "mayaAscii", save = True )
maya.cmds.file( new = True, force = True )
maya.cmds.file( referencerScene, force = True, open = True )
fnOH = IECoreMaya.FnOpHolder( "ns1:ieOpHolderNode1" )
fnOH.setParameterisedValues()
self.assertEqual(
fnOH.getOp()["spline"].getValue().value,
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
(
( 0, IECore.Color3f( 1 ) ),
( 0, IECore.Color3f( 1 ) ),
( 1, IECore.Color3f( 0 ) ),
( 1, IECore.Color3f( 0 ) ),
),
)
)
def testAddFloatSplineToReferencedNode( self ) :
# make a scene with an empty op holder
######################################
maya.cmds.createNode( "ieOpHolderNode" )
maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "opHolderReference.ma" ) )
referenceScene = maya.cmds.file( force = True, type = "mayaAscii", save = True )
# reference it in and add an op with a color spline
###################################################
maya.cmds.file( new = True, force = True )
maya.cmds.file( referenceScene, reference = True, namespace = "ns1" )
fnOH = IECoreMaya.FnOpHolder( "ns1:ieOpHolderNode1" )
fnOH.setOp( "splineInput", 1 )
fnOH.setParameterisedValues()
self.assertEqual(
fnOH.getOp()["spline"].getValue().value,
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
(
( 0, 1 ),
( 0, 1 ),
( 1, 0 ),
( 1, 0 ),
),
)
)
# save the scene, and reload it. check that we've worked
# around another wonderful maya referencing bug
########################################################
maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "opHolderReferencer.ma" ) )
referencerScene = maya.cmds.file( force = True, type = "mayaAscii", save = True )
maya.cmds.file( new = True, force = True )
maya.cmds.file( referencerScene, force = True, open = True )
fnOH = IECoreMaya.FnOpHolder( "ns1:ieOpHolderNode1" )
fnOH.setParameterisedValues()
self.assertEqual(
fnOH.getOp()["spline"].getValue().value,
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
(
( 0, 1 ),
( 0, 1 ),
( 1, 0 ),
( 1, 0 ),
),
)
)
def tearDown( self ) :
paths = [
os.getcwd() + "/test/IECoreMaya/splineParameterHandlerTest.ma",
os.path.join( os.getcwd(), "test", "IECoreMaya", "opHolderReference.ma" ),
os.path.join( os.getcwd(), "test", "IECoreMaya", "opHolderReferencer.ma" ),
]
for path in paths :
if os.path.exists( path ) :
os.remove( path )
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] )
|
AlanZatarain/cortex-vfx
|
test/IECoreMaya/SplineParameterHandlerTest.py
|
Python
|
bsd-3-clause
| 12,548
|
import sys
import json
import csv
if __name__ == "__main__":
try:
if len(sys.argv) < 2:
print "Usage: %s <JSON settings file>" % sys.argv[0]
print " <settings file>: access settings (IP/user/password)"
sys.exit(0)
f = open(sys.argv[1], 'r')
settings_file = json.load(f)
from ucscsdk.ucschandle import UcscHandle
handle = UcscHandle(ip=settings_file['ip'], username=settings_file['user'], password=settings_file['pw'])
handle.login()
print "Deleting DomainGroups:" #% (template)
mo = handle.query_dn("domaingroup-root/domaingroup-DG-NA/domaingroup-DG-US/domaingroup-DG-QCJA-Lab")
handle.remove_mo(mo)
handle.commit()
mo = handle.query_dn("domaingroup-root/domaingroup-DG-NA/domaingroup-DG-US")
handle.remove_mo(mo)
handle.commit()
mo = handle.query_dn("domaingroup-root/domaingroup-DG-NA")
handle.remove_mo(mo)
handle.commit()
handle.logout()
except Exception, err:
print "Exception:", str(err)
import traceback, sys
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
|
jocook/s3260-python
|
ucsc_destroy_DomainGroups.py
|
Python
|
apache-2.0
| 1,216
|
import bson
from eduid_userdb.exceptions import UserDoesNotExist
from eduid_userdb.testing import MongoTestCase
from eduid_api_amp import attribute_fetcher
from eduid_am.celery import celery, get_attribute_manager
TEST_DB_NAME = 'eduid_api_test'
class AttributeFetcherTests(MongoTestCase):
def setUp(self, settings={}, skip_on_fail=False, std_user='johnsmith@example.com'):
super(AttributeFetcherTests, self).setUp(celery, get_attribute_manager, userdb_use_old_format=True)
def test_invalid_user(self):
self.assertRaises(UserDoesNotExist, attribute_fetcher, self.tmp_db.conn['test'],
bson.ObjectId('000000000000000000000000'))
def test_existing_user(self):
user_id = self.tmp_db.conn['test'].users.insert({
'email': 'john@example.com',
'givenName': 'John',
})
self.assertEqual(
attribute_fetcher(self.tmp_db.conn['test'], user_id),
{'email': 'john@example.com',
'givenName': 'John',
}
)
def test_malicious_attributes(self):
user_id = self.tmp_db.conn['test'].users.insert({
'email': 'john@example.com',
'givenName': 'John',
'malicious': 'hacker',
})
# Malicious attributes are not returned
self.assertEqual(
attribute_fetcher(self.tmp_db.conn['test'], user_id),
{'email': 'john@example.com',
'givenName': 'John',
}
)
|
SUNET/eduid-api-amp
|
eduid_api_amp/tests.py
|
Python
|
bsd-3-clause
| 1,517
|
#!/usr/bin/env python
""" motion_history_demo.py - Version 1.0 2013-06-26
Based on the OpenCV motempl.py sample code
Extends the ros2opencv2.py script which takes care of user input and image display
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
import cv2
import cv2.cv as cv
from ros2opencv2 import ROS2OpenCV2
import numpy as np
from common import nothing, clock, draw_str
MHI_DURATION = 0.1
DEFAULT_THRESHOLD = 12
MAX_TIME_DELTA = 0.05
MIN_TIME_DELTA = 0.02
class MotionHistory(ROS2OpenCV2):
def __init__(self, node_name):
super(MotionHistory, self).__init__(node_name)
self.node_name = node_name
self.visuals = ['input', 'frame_diff', 'motion_hist', 'motion_hist_color', 'grad_orient']
#cv2.namedWindow('motion_templates')
cv2.createTrackbar('visual', node_name, 2, len(self.visuals)-1, nothing)
cv2.createTrackbar('threshold', node_name, DEFAULT_THRESHOLD, 255, nothing)
self.motion_history = None
cv.NamedWindow("Contours", cv.CV_WINDOW_NORMAL)
cv.ResizeWindow("Contours", 640, 480)
def process_image(self, cv_image):
if self.motion_history == None:
self.h, self.w = cv_image.shape[:2]
self.prev_frame = cv_image.copy()
self.motion_history = np.zeros((self.h, self.w), np.float32)
self.hsv = np.zeros((self.h, self.w, 3), np.uint8)
self.hsv[:,:,1] = 255
self.erode_kernel = cv2.getStructuringElement(cv2.MORPH_ERODE,(3,3))
color_frame = cv_image.copy()
frame_diff = cv2.absdiff(color_frame, self.prev_frame)
gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
thresh = cv2.getTrackbarPos('threshold', self.node_name)
ret, motion_mask = cv2.threshold(gray_diff, thresh, 1, cv2.THRESH_BINARY)
motion_mask = cv2.erode(motion_mask, self.erode_kernel, iterations=2)
motion_mask = cv2.dilate(motion_mask, self.erode_kernel, iterations=2)
timestamp = clock()
cv2.updateMotionHistory(motion_mask, self.motion_history, timestamp, MHI_DURATION)
mg_mask, mg_orient = cv2.calcMotionGradient(self.motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
seg_mask, seg_bounds = cv2.segmentMotion(self.motion_history, timestamp, MAX_TIME_DELTA)
visual_name = self.visuals[cv2.getTrackbarPos('visual', self.node_name)]
if visual_name == 'input':
vis = cv_image.copy()
elif visual_name == 'frame_diff':
vis = frame_diff.copy()
elif visual_name == 'motion_hist':
vis = np.uint8(np.clip((self.motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
elif visual_name == 'motion_hist_color':
vis = np.uint8(np.clip((self.motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
elif visual_name == 'grad_orient':
self.hsv[:,:,0] = mg_orient/2
self.hsv[:,:,2] = mg_mask*255
vis = cv2.cvtColor(self.hsv, cv2.COLOR_HSV2BGR)
max_rect_area = 0
for i, rect in enumerate([(0, 0, self.w, self.h)] + list(seg_bounds)):
x, y, rw, rh = rect
area = rw*rh
if area < 64**2:
continue
if area < 640*480 and area > max_rect_area:
max_rect_area = area
max_rect = rect
silh_roi = motion_mask [y:y+rh,x:x+rw]
orient_roi = mg_orient [y:y+rh,x:x+rw]
mask_roi = mg_mask [y:y+rh,x:x+rw]
mhi_roi = self.motion_history[y:y+rh,x:x+rw]
if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
continue
angle = cv2.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
color = ((255, 0, 0), (0, 0, 255))[i == 0]
#draw_motion_comp(vis, rect, angle, color)
#draw_str(vis, (20, 20), visual_name)
display_image = cv_image.copy()
if max_rect_area != 0:
x, y, w, h = max_rect
display = color_frame[y:y+h,x:x+w]
# #bounding_box = cv2.boundingRect(vis)
# #print bounding_box
#
if visual_name == 'motion_hist':
display = vis.copy()
else:
display = cv2.bitwise_and(color_frame, vis, vis)
draw_str(vis, (20, 20), visual_name)
contour_image = vis.copy()
contours, hierarchy = cv2.findContours(contour_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#ty:
contour_points = list()
if len(contours) != 0:
for cnt in contours:
contour_points.append(cnt)
vstack_points = np.vstack(contour_points)
if len(vstack_points) > 5:
z_ellipse = cv2.fitEllipse(vstack_points)
cv2.ellipse(display_image, z_ellipse, (0,255,0), 2)
cv2.drawContours(display_image, contours, -1, (0,255,0), 3)
cv2.imshow("Contours", display_image)
self.prev_frame = color_frame
#cv2.waitKey(5)
return cv_image
def draw_motion_comp(vis, (x, y, w, h), angle, color):
cv2.rectangle(vis, (x, y), (x+w, y+h), (0, 255, 0))
r = min(w/2, h/2)
cx, cy = x+w/2, y+h/2
angle = angle*np.pi/180
cv2.circle(vis, (cx, cy), r, color, 3)
cv2.line(vis, (cx, cy), (int(cx+np.cos(angle)*r), int(cy+np.sin(angle)*r)), color, 3)
def trunc(f, n):
'''Truncates/pads a float f to n decimal places without rounding'''
slen = len('%.*f' % (n, f))
return float(str(f)[:slen])
if __name__ == '__main__':
try:
node_name = "motion_history"
MotionHistory(node_name)
rospy.spin()
except KeyboardInterrupt:
print "Shutting down motion history node."
cv2.destroyAllWindows()
|
fujy/ROS-Project
|
src/rbx2/rbx2_vision/nodes/unused/motion_history_demo.py
|
Python
|
mit
| 6,836
|
import datetime
from pprint import pprint
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from fo2.connections import db_cursor_so
import logistica.models as models
from utils.functions.models import rows_to_dict_list
from utils.functions.queries import debug_cursor_execute
class Command(BaseCommand):
help = 'Copying CODIGO_FAMILIA from operation 21 to 33, when advanced.'
def handle(self, *args, **options):
try:
cursor = db_cursor_so()
# get lotes a atualizar família do estágio 33
sql_get = '''
SELECT
l21.CODIGO_FAMILIA
, l21.PERIODO_PRODUCAO
, l21.ORDEM_CONFECCAO
FROM PCPC_040 l21
JOIN pcpc_040 l33
ON l33.PERIODO_PRODUCAO = l21.PERIODO_PRODUCAO
AND l33.ORDEM_CONFECCAO = l21.ORDEM_CONFECCAO
JOIN pcpc_020 o
ON o.ORDEM_PRODUCAO = l21.ORDEM_PRODUCAO
WHERE o.ALTERNATIVA_PECA = 1
AND l21.PERIODO_PRODUCAO = o.PERIODO_PRODUCAO
AND l21.CODIGO_ESTAGIO = 21
AND l21.CODIGO_FAMILIA <> 0
AND l33.CODIGO_ESTAGIO = 33
AND l33.CODIGO_FAMILIA = 0
AND ( l33.QTDE_PECAS_PROD <> 0
OR l33.QTDE_CONSERTO <> 0
OR l33.QTDE_PECAS_2A <> 0
OR l33.QTDE_PERDAS <> 0
)
'''
debug_cursor_execute(cursor, sql_get)
lotes_ori = rows_to_dict_list(cursor)
# self.stdout.write('len(lotes_ori) = {}'.format(len(lotes_ori)))
# set CODIGO_FAMILIA
sql_set = '''
UPDATE PCPC_040 l
SET
l.CODIGO_FAMILIA = %s
WHERE l.PERIODO_PRODUCAO = %s
AND l.ORDEM_CONFECCAO = %s
AND l.CODIGO_ESTAGIO = 33
'''
for ori in lotes_ori:
# self.stdout.write(str(
# [ori['CODIGO_FAMILIA'], ori['PERIODO_PRODUCAO'],
# ori['ORDEM_CONFECCAO']]))
debug_cursor_execute(
cursor,
sql_set,
[ori['CODIGO_FAMILIA'], ori['PERIODO_PRODUCAO'],
ori['ORDEM_CONFECCAO']])
debug_cursor_execute(cursor, sql_get)
lotes_new = rows_to_dict_list(cursor)
date = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
for ori in lotes_ori:
if ori not in lotes_new:
if date:
self.stdout.write(date)
date = None
self.stdout.write(str(ori))
except Exception as e:
raise CommandError('Error copying CODIGO_FAMILIA'.format(e))
|
anselmobd/fo2
|
src/lotes/management/commands/celula_21_para_33.py
|
Python
|
mit
| 2,964
|
from __future__ import print_function
import logging
import random
import re
from streamlink.compat import urljoin
from streamlink.plugin import Plugin, PluginArguments, PluginArgument
from streamlink.plugin.api import http
from streamlink.plugin.api import useragents
from streamlink.plugin.api import validate
from streamlink.plugin.api.utils import itertags
from streamlink.stream import HLSStream
from streamlink.stream import HTTPStream
from streamlink.stream.ffmpegmux import MuxedStream
log = logging.getLogger(__name__)
class Experience(object):
CSRF_NAME = "csrfmiddlewaretoken"
login_url = "https://www.funimation.com/log-in/"
api_base = "https://www.funimation.com/api"
login_api_url = "https://prod-api-funimationnow.dadcdigital.com/api/auth/login/"
show_api_url = api_base + "/experience/{experience_id}/"
sources_api_url = api_base + "/showexperience/{experience_id}/"
languages = ["english", "japanese"]
alphas = ["uncut", "simulcast"]
login_schema = validate.Schema(validate.any(
{"success": False,
"error": validate.text},
{"token": validate.text,
"user": {"id": int}}
))
def __init__(self, experience_id):
"""
:param experience_id: starting experience_id, may be changed later
"""
self.experience_id = experience_id
self._language = None
self.cache = {}
self.token = None
def request(self, method, url, *args, **kwargs):
headers = kwargs.pop("headers", {})
if self.token:
headers.update({"Authorization": "Token {0}".format(self.token)})
http.cookies.update({"src_token": self.token})
log.debug("Making {0}request to {1}".format("authorized " if self.token else "", url))
return http.request(method, url, *args, headers=headers, **kwargs)
def get(self, *args, **kwargs):
return self.request("GET", *args, **kwargs)
def post(self, *args, **kwargs):
return self.request("POST", *args, **kwargs)
@property
def pinst_id(self):
return ''.join([
random.choice("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") for _ in range(8)
])
def _update(self):
api_url = self.show_api_url.format(experience_id=self.experience_id)
log.debug("Requesting experience data: {0}".format(api_url))
res = self.get(api_url)
data = http.json(res)
self.cache[self.experience_id] = data
@property
def show_info(self):
if self.experience_id not in self.cache:
self._update()
return self.cache.get(self.experience_id)
@property
def episode_info(self):
"""
Search for the episode with the requested experience Id
:return:
"""
for season in self.show_info["seasons"]:
for episode in season["episodes"]:
for lang in episode["languages"].values():
for alpha in lang["alpha"].values():
if alpha["experienceId"] == self.experience_id:
return episode
@property
def language(self):
for language, lang_data in self.episode_info["languages"].items():
for alpha in lang_data["alpha"].values():
if alpha["experienceId"] == self.experience_id:
return language
@property
def language_code(self):
return {"english": "eng", "japanese": "jpn"}[self.language]
def set_language(self, language):
if language in self.episode_info["languages"]:
for alpha in self.episode_info["languages"][language]["alpha"].values():
self.experience_id = alpha["experienceId"]
def _get_alpha(self):
for lang_data in self.episode_info["languages"].values():
for alpha in lang_data["alpha"].values():
if alpha["experienceId"] == self.experience_id:
return alpha
def subtitles(self):
alpha = self._get_alpha()
for src in alpha["sources"]:
return src["textTracks"]
def sources(self):
"""
Get the sources for a given experience_id, which is tied to a specific language
:param experience_id: int; video content id
:return: sources dict
"""
api_url = self.sources_api_url.format(experience_id=self.experience_id)
res = self.get(api_url, params={"pinst_id": self.pinst_id})
return http.json(res)
def login_csrf(self):
r = http.get(self.login_url)
for input in itertags(r.text, "input"):
if input.attributes.get("name") == self.CSRF_NAME:
return input.attributes.get("value")
def login(self, email, password):
log.debug("Attempting to login as {0}".format(email))
r = self.post(self.login_api_url,
data={'username': email, 'password': password, self.CSRF_NAME: self.login_csrf()},
raise_for_status=False,
headers={"Referer": "https://www.funimation.com/log-in/"})
d = http.json(r, schema=self.login_schema)
self.token = d.get("token", None)
return self.token is not None
class FunimationNow(Plugin):
arguments = PluginArguments(
PluginArgument(
"email",
argument_name="funimation-email",
requires=["password"],
help="Email address for your Funimation account."
),
PluginArgument(
"password",
argument_name="funimation-password",
sensitive=True,
help="Password for your Funimation account."
),
PluginArgument(
"language",
argument_name="funimation-language",
choices=["en", "ja", "english", "japanese"],
default="english",
help="""
The audio language to use for the stream; japanese or english.
Default is "english".
"""
),
PluginArgument(
"mux-subtitles",
argument_name="funimation-mux-subtitles",
action="store_true",
help="""
Enable automatically including available subtitles in to the output stream.
"""
)
)
url_re = re.compile(r"""
https?://(?:www\.)funimation(.com|now.uk)
""", re.VERBOSE)
experience_id_re = re.compile(r"/player/(\d+)")
mp4_quality = "480p"
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
http.headers = {"User-Agent": useragents.CHROME}
res = http.get(self.url)
# remap en to english, and ja to japanese
rlanguage = {"en": "english", "ja": "japanese"}.get(self.get_option("language").lower(),
self.get_option("language").lower())
if "_Incapsula_Resource" in res.text:
self.bypass_incapsula(res)
res = http.get(self.url)
id_m = self.experience_id_re.search(res.text)
experience_id = id_m and int(id_m.group(1))
if experience_id:
log.debug("Found experience ID: {0}", experience_id)
exp = Experience(experience_id)
if self.get_option("email") and self.get_option("password"):
if exp.login(self.get_option("email"), self.get_option("password")):
log.info("Logged in to Funimation as {0}", self.get_option("email"))
else:
log.warning("Failed to login")
log.debug("Found episode: {0}", exp.episode_info["episodeTitle"])
log.debug(" has languages: {0}", ", ".join(exp.episode_info["languages"].keys()))
log.debug(" requested language: {0}", rlanguage)
log.debug(" current language: {0}", exp.language)
if rlanguage != exp.language:
log.debug("switching language to: {0}", rlanguage)
exp.set_language(rlanguage)
if exp.language != rlanguage:
log.warning("Requested language {0} is not available, continuing with {1}",
rlanguage, exp.language)
else:
log.debug("New experience ID: {0}", exp.experience_id)
subtitles = None
stream_metadata = {}
disposition = {}
for subtitle in exp.subtitles():
log.debug("Subtitles: {0}", subtitle["src"])
if subtitle["src"].endswith(".vtt") or subtitle["src"].endswith(".srt"):
sub_lang = {"en": "eng", "ja": "jpn"}[subtitle["language"]]
# pick the first suitable subtitle stream
subtitles = subtitles or HTTPStream(self.session, subtitle["src"])
stream_metadata["s:s:0"] = ["language={0}".format(sub_lang)]
stream_metadata["s:a:0"] = ["language={0}".format(exp.language_code)]
sources = exp.sources()
if 'errors' in sources:
for error in sources['errors']:
log.error("{0} : {1}".format(error['title'], error['detail']))
return
for item in sources["items"]:
url = item["src"]
if ".m3u8" in url:
for q, s in HLSStream.parse_variant_playlist(self.session, url).items():
if self.get_option("mux_subtitles") and subtitles:
yield q, MuxedStream(self.session, s, subtitles, metadata=stream_metadata,
disposition=disposition)
else:
yield q, s
elif ".mp4" in url:
# TODO: fix quality
s = HTTPStream(self.session, url)
if self.get_option("mux_subtitles") and subtitles:
yield self.mp4_quality, MuxedStream(self.session, s, subtitles, metadata=stream_metadata,
disposition=disposition)
else:
yield self.mp4_quality, s
else:
log.error("Could not find experience ID?!")
def bypass_incapsula(self, res):
log.info("Attempting to by-pass Incapsula...")
self.clear_cookies(lambda c: "incap" in c.name)
for m in re.finditer(r'''"([A-Z0-9]+)"''', res.text):
d = m.group(1)
# decode the encoded blob to text
js = "".join(map(lambda i: chr(int(i, 16)), [d[x:x + 2] for x in range(0, len(d), 2)]))
jsm = re.search(r'''"GET","([^"]+)''', js)
url = jsm and jsm.group(1)
if url:
log.debug("Found Incapsula auth URL: {0}", url)
res = http.get(urljoin(self.url, url))
success = res.status_code == 200
if success:
self.save_cookies(lambda c: "incap" in c.name)
return success
__plugin__ = FunimationNow
|
javiercantero/streamlink
|
src/streamlink/plugins/funimationnow.py
|
Python
|
bsd-2-clause
| 11,232
|
"""
Tests for resize functionality.
"""
from itertools import permutations
from helper import unittest, PillowTestCase, hopper
from PIL import Image
class TestImagingCoreResize(PillowTestCase):
def resize(self, im, size, f):
# Image class independent version of resize.
im.load()
return im._new(im.im.resize(size, f))
def test_nearest_mode(self):
for mode in ["1", "P", "L", "I", "F", "RGB", "RGBA", "CMYK", "YCbCr",
"I;16"]: # exotic mode
im = hopper(mode)
r = self.resize(im, (15, 12), Image.NEAREST)
self.assertEqual(r.mode, mode)
self.assertEqual(r.size, (15, 12) )
self.assertEqual(r.im.bands, im.im.bands)
def test_convolution_modes(self):
self.assertRaises(ValueError, self.resize, hopper("1"),
(15, 12), Image.BILINEAR)
self.assertRaises(ValueError, self.resize, hopper("P"),
(15, 12), Image.BILINEAR)
self.assertRaises(ValueError, self.resize, hopper("I;16"),
(15, 12), Image.BILINEAR)
for mode in ["L", "I", "F", "RGB", "RGBA", "CMYK", "YCbCr"]:
im = hopper(mode)
r = self.resize(im, (15, 12), Image.BILINEAR)
self.assertEqual(r.mode, mode)
self.assertEqual(r.size, (15, 12) )
self.assertEqual(r.im.bands, im.im.bands)
def test_reduce_filters(self):
for f in [Image.LINEAR, Image.BILINEAR, Image.BICUBIC, Image.LANCZOS]:
r = self.resize(hopper("RGB"), (15, 12), f)
self.assertEqual(r.mode, "RGB")
self.assertEqual(r.size, (15, 12))
def test_enlarge_filters(self):
for f in [Image.LINEAR, Image.BILINEAR, Image.BICUBIC, Image.LANCZOS]:
r = self.resize(hopper("RGB"), (212, 195), f)
self.assertEqual(r.mode, "RGB")
self.assertEqual(r.size, (212, 195))
def test_endianness(self):
# Make an image with one colored pixel, in one channel.
# When resized, that channel should be the same as a GS image.
# Other channels should be unaffected.
# The R and A channels should not swap, which is indicitive of
# an endianness issues.
samples = {
'blank': Image.new('L', (2, 2), 0),
'filled': Image.new('L', (2, 2), 255),
'dirty': Image.new('L', (2, 2), 0),
}
samples['dirty'].putpixel((1, 1), 128)
for f in [Image.LINEAR, Image.BILINEAR, Image.BICUBIC, Image.LANCZOS]:
# samples resized with current filter
references = dict(
(name, self.resize(ch, (4, 4), f))
for name, ch in samples.items()
)
for mode, channels_set in [
('RGB', ('blank', 'filled', 'dirty')),
('RGBA', ('blank', 'blank', 'filled', 'dirty')),
('LA', ('filled', 'dirty')),
]:
for channels in set(permutations(channels_set)):
# compile image from different channels permutations
im = Image.merge(mode, [samples[ch] for ch in channels])
resized = self.resize(im, (4, 4), f)
for i, ch in enumerate(resized.split()):
# check what resized channel in image is the same
# as separately resized channel
self.assert_image_equal(ch, references[channels[i]])
class TestImageResize(PillowTestCase):
def test_resize(self):
def resize(mode, size):
out = hopper(mode).resize(size)
self.assertEqual(out.mode, mode)
self.assertEqual(out.size, size)
for mode in "1", "P", "L", "RGB", "I", "F":
resize(mode, (112, 103))
resize(mode, (188, 214))
if __name__ == '__main__':
unittest.main()
# End of file
|
1upon0/rfid-auth-system
|
GUI/printer/Pillow-2.7.0/Tests/test_image_resize.py
|
Python
|
apache-2.0
| 3,963
|
import bpy
from bpy.utils import register_class, unregister_class
import nodeitems_utils
from .random_property import RandomPropertyNode
from .ramp_property import RampPropertyNode
classes = [RandomPropertyNode, RampPropertyNode]
def register():
for cls in classes:
register_class(cls)
def unregister():
for cls in classes:
unregister_class(cls)
|
MaximeHerpin/modular_tree
|
python_classes/nodes/properties/__init__.py
|
Python
|
gpl-3.0
| 374
|
# -*- encoding: utf-8 -*-
#
# This file is part of jottafs.
#
# jottafs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jottafs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jottafs. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2011,2013,2014 Håvard Gulldahl <havard@gulldahl.no>
# metadata
__author__ = 'havard@gulldahl.no'
from jottalib import __version__
# importing stdlib
import sys, os, os.path, time
import posixpath, logging, datetime, hashlib
from collections import namedtuple
import six
from six.moves import cStringIO as StringIO
# importing external dependencies (pip these, please!)
import requests
from requests.utils import quote
import netrc
import requests_toolbelt
import certifi
import lxml, lxml.objectify
import dateutil, dateutil.parser # pip install python-dateutil
log = logging.getLogger(__name__)
#monkeypatch urllib3 param function to bypass bug in jottacloud servers
from requests.packages import urllib3
urllib3.fields.format_header_param_orig = urllib3.fields.format_header_param
def mp(name, value):
return urllib3.fields.format_header_param_orig(name, value).replace('filename*=', 'filename=')
urllib3.fields.format_header_param = mp
# some setup
JFS_ROOT='https://www.jottacloud.com/jfs/'
# helper functions
try:
unicode("we are python2")
except NameError:
def unicode(s): return str(s) # TODO: use six
def get_auth_info():
""" Get authentication details to jottacloud.
Will first check environment variables, then the .netrc file.
"""
env_username = os.environ.get('JOTTACLOUD_USERNAME')
env_password = os.environ.get('JOTTACLOUD_PASSWORD')
netrc_auth = None
try:
netrc_file = netrc.netrc()
netrc_auth = netrc_file.authenticators('jottacloud.com')
except IOError:
# .netrc file doesn't exist
pass
netrc_username = None
netrc_password = None
if netrc_auth:
netrc_username, _, netrc_password = netrc_auth
username = env_username or netrc_username
password = env_password or netrc_password
if not (username and password):
raise JFSError('Could not find username and password in either env or ~/.netrc, '
'you need to add one of these to use these tools')
return (username, password)
def calculate_md5(fileobject, size=2**16):
"""Utility function to calculate md5 hashes while being light on memory usage.
By reading the fileobject piece by piece, we are able to process content that
is larger than available memory"""
fileobject.seek(0)
md5 = hashlib.md5()
for data in iter(lambda: fileobject.read(size), b''):
md5.update(data)
fileobject.seek(0) # rewind read head
return md5.hexdigest()
# error classes
class JFSError(Exception):
@staticmethod
def raiseError(e, path): # parse object from lxml.objectify and
if(e.code) == 404:
raise JFSNotFoundError('%s does not exist (%s)' % (path, e.message))
elif(e.code) == 401:
raise JFSCredentialsError("Your credentials don't match for %s (%s) (probably incorrect password!)" % (path, e.message))
elif(e.code) == 403:
raise JFSAuthenticationError("You don't have access to %s (%s)" % (path, e.message))
elif(e.code) == 416:
raise JFSRangeError("Requested Range Not Satisfiable (%s)" % e.message)
elif(e.code) == 500:
raise JFSServerError("Internal server error: %s (%s)" % (path, e.message))
elif(e.code) == 400:
raise JFSBadRequestError('Bad request: %s (%s)' % (path, e.message))
else:
raise JFSError('Error accessing %s (%s)' % (path, e.message))
class JFSBadRequestError(JFSError): # HTTP 400
pass
class JFSCredentialsError(JFSError): # HTTP 401
pass
class JFSNotFoundError(JFSError): # HTTP 404
pass
class JFSAccessError(JFSError): #
pass
class JFSAuthenticationError(JFSError): # HTTP 403
pass
class JFSRangeError(JFSError): # HTTP 416
pass
class JFSServerError(JFSError): # HTTP 500
pass
# classes mapping JFS structures
class JFSFileDirList(object):
'Wrapping <filedirlist>, a simple tree of folders and their files'
"""get a <filedirlist> for any jottafolder by appending ?mode=list to your query
<filedirlist time="2015-05-28-T18:57:06Z" host="dn-093.site-000.jotta.no">
<folders>
<folder name="Sync">
<path xml:space="preserve">/havardgulldahl/Jotta</path>
<abspath xml:space="preserve">/havardgulldahl/Jotta</abspath>
<files>
<file>..."""
def __init__(self, filedirlistobject, jfs, parentpath): # filedirlistobject from lxml.objectify
self.filedirlist = filedirlistobject
self.parentPath = parentpath
self.jfs = jfs
treefile = namedtuple('TreeFile', 'name size md5 uuid')
self.tree = {}
for folder in self.filedirlist.folders.iterchildren():
foldername = unicode(folder.attrib.get('name'))
path = unicode(folder.path)
t = []
if hasattr(folder, 'files'):
for file_ in folder.files.iterchildren():
if hasattr(file_, 'currentRevision'): # a normal file
t.append(treefile(unicode(file_.attrib['name']),
int(file_.currentRevision.size),
unicode(file_.currentRevision.md5),
unicode(file_.attrib['uuid'])
)
)
else:
# an incomplete file
t.append(treefile(unicode(file_.attrib['name']),
-1, # incomplete files have no size
unicode(file_.latestRevision.md5),
unicode(file_.attrib['uuid'])
)
)
self.tree[posixpath.join(path, foldername)] = t
class JFSFolder(object):
'OO interface to a folder, for convenient access. Type less, do more.'
def __init__(self, folderobject, jfs, parentpath): # folderobject from lxml.objectify
self.folder = folderobject
self.parentPath = parentpath
self.jfs = jfs
self.synced = False
@property
def name(self):
return unicode(self.folder.attrib['name']) if self.folder.attrib.has_key('name') else unicode(self.folder.name)
@property
def path(self):
return '%s/%s' % (self.parentPath, self.name)
@property
def deleted(self):
'Return datetime.datetime or None if the file isnt deleted'
_d = self.folder.attrib.get('deleted', None)
if _d is None: return None
return dateutil.parser.parse(str(_d))
def sync(self):
'Update state of folder from Jottacloud server'
log.info("syncing %s" % self.path)
self.folder = self.jfs.get(self.path)
self.synced = True
def is_deleted(self):
'Return bool based on self.deleted'
return self.deleted is not None
def files(self):
if not self.synced:
self.sync()
try:
#return [JFSFile(f, self.jfs, self.path) for f in self.folder.files.iterchildren()]
for _f in self.folder.files.iterchildren():
if hasattr(_f, 'currentRevision'): # a normal file
yield JFSFile(_f, self.jfs, self.path)
else:
yield JFSIncompleteFile(_f, self.jfs, self.path)
except AttributeError:
while False:
yield None
#return [x for x in []]
def folders(self):
if not self.synced:
self.sync()
try:
return [JFSFolder(f, self.jfs, self.path) for f in self.folder.folders.iterchildren()]
except AttributeError:
return [x for x in []]
def mkdir(self, foldername):
'Create a new subfolder and return the new JFSFolder'
url = '%s?mkDir=true' % posixpath.join(self.path, foldername)
r = self.jfs.post(url)
self.sync()
return r
def restore(self):
'Restore the folder'
if not self.deleted:
raise JFSError('Tried to restore a not deleted folder')
url = 'https://www.jottacloud.com/rest/webrest/%s/action/restore' % self.jfs.username
data = {'paths[]': self.path.replace(JFS_ROOT, ''),
'web': 'true',
'ts': int(time.time()),
'authToken': 0}
r = self.jfs.post(url, content=data)
return r
def delete(self):
'Delete this folder and return a deleted JFSFolder'
url = '%s?dlDir=true' % self.path
r = self.jfs.post(url)
self.sync()
return r
def hard_delete(self):
'Deletes without possibility to restore'
url = 'https://www.jottacloud.com/rest/webrest/%s/action/delete' % self.jfs.username
data = {'paths[]': self.path.replace(JFS_ROOT, ''),
'web': 'true',
'ts': int(time.time()),
'authToken': 0}
r = self.jfs.post(url, content=data)
return r
def rename(self, newpath):
"Move folder to a new name, possibly a whole new path"
# POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder
url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath)
r = self.jfs.post(url, extra_headers={'Content-Type':'application/octet-stream'})
return r
def up(self, fileobj_or_path, filename=None, upload_callback=None):
'Upload a file to current folder and return the new JFSFile'
if not ( hasattr(fileobj_or_path, 'read') and hasattr(fileobj_or_path, 'name') ):
filename = os.path.basename(fileobj_or_path)
fileobj_or_path = open(fileobj_or_path, 'rb')
elif filename is None: # fileobj is file, but filename is None
filename = os.path.basename(fileobj_or_path.name)
log.debug('.up %s -> %s %s', repr(fileobj_or_path), repr(self.path), repr(filename))
r = self.jfs.up(posixpath.join(self.path, filename), fileobj_or_path,
upload_callback=upload_callback)
self.sync()
return r
def filedirlist(self):
'Get a JFSFileDirList, recursive tree of JFSFile and JFSFolder'
url = '%s?mode=list' % self.path
return self.jfs.getObject(url)
class ProtoFile(object):
'Prototype for different incarnations fo file, e.g. JFSIncompleteFile and JFSFile'
# constants for known file states
STATE_COMPLETED = 'COMPLETED' # -> JFSFile
STATE_ADDED = 'ADDED'
STATE_INCOMPLETE = 'INCOMPLETE' # -> JFSIncompleteFile
STATE_PROCESSING = 'PROCESSING'
STATE_CORRUPT = 'CORRUPT'
def __init__(self, fileobject, jfs, parentpath): # fileobject from lxml.objectify
self.f = fileobject
self.jfs = jfs
self.parentPath = parentpath
def is_image(self):
'Return bool based on self.mime'
return os.path.dirname(self.mime) == 'image'
@property
def name(self):
return unicode(self.f.attrib['name'])
@property
def uuid(self):
return unicode(self.f.attrib['uuid'])
@property
def deleted(self):
'Return datetime.datetime or None if the file isnt deleted'
_d = self.f.attrib.get('deleted', None)
if _d is None: return None
return dateutil.parser.parse(str(_d))
def is_deleted(self):
'Return bool based on self.deleted'
return self.deleted is not None
@property
def path(self):
return '%s/%s' % (self.parentPath, self.name)
class JFSIncompleteFile(ProtoFile):
'OO interface to an incomplete file.'
"""<file name="iii.m4v" uuid="e8f268ac-d081-4d4f-bfb1-77149b2bd51d" time="2015-05-29-T18:11:56Z" host="dn-091.site-000.jotta.no">
<path xml:space="preserve">/havardgulldahl/Jotta/Sync</path>
<abspath xml:space="preserve">/havardgulldahl/Jotta/Sync</abspath>
<latestRevision>
<number>1</number>
<state>INCOMPLETE</state>
<created>2014-05-22-T22:13:52Z</created>
<modified>2014-05-19-T13:37:14Z</modified>
<mime>video/mp4</mime>
<mstyle>VIDEO_MP4</mstyle>
<size>100483008</size> <!-- THIS IS THE SIZE OF WHAT'S BEEN TRANSFERED THIS FAR. havardgulldahl -->
<md5>4d7cdab5256b72d17075ec388e467e99</md5>
<updated>2015-05-29-T18:07:56Z</updated>
</latestRevision>
</file>
</file>"""
def resume(self, data):
'Resume uploading an incomplete file, after a previous upload was interrupted. Returns new file object'
if not hasattr(data, 'read'):
data = StringIO(data)
#check if what we're asked to upload is actually the right file
md5 = calculate_md5(data)
if md5 != self.md5:
raise JFSError('''MD5 hashes don't match! Are you trying to resume with the wrong file?''')
log.debug('Resuming %s from offset %s', self.path, self.size)
return self.jfs.up(self.path, data, resume_offset=self.size)
@property
def revisionNumber(self):
'return int of current revision'
return int(self.f.latestRevision.number)
@property
def created(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.latestRevision.created))
@property
def modified(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.latestRevision.modified))
@property
def updated(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.latestRevision.updated))
@property
def md5(self):
return str(self.f.latestRevision.md5)
@property
def mime(self):
return unicode(self.f.latestRevision.mime)
@property
def state(self):
return unicode(self.f.latestRevision.state)
@property
def size(self):
"""Bytes uploaded of the file so far.
Note that we only have the file size if the file was requested directly,
not if it's part of a folder listing.
"""
if hasattr(self.f.latestRevision, 'size'):
return int(self.f.latestRevision.size)
return None
class JFSFile(JFSIncompleteFile):
'OO interface to a file, for convenient access. Type less, do more.'
## TODO: add <revisions> iterator for all
"""
<file name="jottacloud.sync.pdfname" uuid="37530f11-d55b-4f31-acf4-27854813cd34" time="2013-12-15-T01:11:52Z" host="dn-029.site-000.jotta.no">
<path xml:space="preserve">/havardgulldahl/Jotta/Sync</path>
<abspath xml:space="preserve">/havardgulldahl/Jotta/Sync</abspath>
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2013-07-19-T22:59:16Z</created>
<modified>2013-07-19-T22:59:17Z</modified>
<mime>application/octet-stream</mime>
<mstyle>APPLICATION_OCTET_STREAM</mstyle>
<size>218028</size>
<md5>e8f05ca4ebd70bc93ce2f18e26cee2a3</md5>
<updated>2013-07-19-T22:59:31Z</updated>
</currentRevision>
</file>
"""
# Constants for thumb nail sizes
BIGTHUMB='WL'
MEDIUMTHUMB='WM'
SMALLTHUMB='WS'
XLTHUMB='WXL'
def __init__(self, fileobject, jfs, parentpath): # fileobject from lxml.objectify
self.f = fileobject
self.jfs = jfs
self.parentPath = parentpath
def stream(self, chunk_size=64*1024):
'Returns a generator to iterate over the file contents'
return self.jfs.stream(url='%s?mode=bin' % self.path, chunk_size=chunk_size)
def read(self):
'Get the file contents as string'
return self.jfs.raw('%s?mode=bin' % self.path)
"""
* name = 'jottacloud.sync.pdfname'
* uuid = '37530f11-d55b-4f31-acf4-27854813cd34'
currentRevision = None [ObjectifiedElement]
number = 1 [IntElement]
state = 'COMPLETED' [StringElement]
created = '2010-11-19-T12:34:18Z' [StringElement]
modified = '2010-11-19-T12:34:18Z' [StringElement]
mime = 'image/jpeg' [StringElement]
mstyle = 'IMAGE_JPEG' [StringElement]
size = 125848 [IntElement]
md5 = 'a0dc8233169b238681c43f9981efe8e1' [StringElement]
updated = '2010-11-19-T12:34:28Z' [StringElement]
"""
def readpartial(self, start, end):
'Get a part of the file, from start byte to end byte (integers)'
return self.jfs.raw('%s?mode=bin' % self.path,
# note that we deduct 1 from end because
# in http Range requests, the end value is included in the slice,
# whereas in python, it is not
extra_headers={'Range':'bytes=%s-%s' % (start, end-1)})
def write(self, data):
'Put, possibly replace, file contents with (new) data'
if not hasattr(data, 'read'):
data = StringIO(data)
self.jfs.up(self.path, data)
def share(self):
'Enable public access at secret, share only uri, and return that uri'
url = 'https://www.jottacloud.com/rest/webrest/%s/action/enableSharing' % self.jfs.username
data = {'paths[]':self.path.replace(JFS_ROOT, ''),
'web':'true',
'ts':int(time.time()),
'authToken':0}
r = self.jfs.post(url, content=data)
return r
def restore(self):
'Restore the file'
if not self.deleted:
raise JFSError('Tried to restore a not deleted file')
url = 'https://www.jottacloud.com/rest/webrest/%s/action/restore' % self.jfs.username
data = {'paths[]': self.path.replace(JFS_ROOT, ''),
'web': 'true',
'ts': int(time.time()),
'authToken': 0}
r = self.jfs.post(url, content=data)
return r
def hard_delete(self):
'Deletes without possibility to restore'
url = 'https://www.jottacloud.com/rest/webrest/%s/action/delete' % self.jfs.username
data = {'paths[]': self.path.replace(JFS_ROOT, ''),
'web': 'true',
'ts': int(time.time()),
'authToken': 0}
r = self.jfs.post(url, content=data)
return r
def delete(self):
'Delete this file and return the new, deleted JFSFile'
url = '%s?dl=true' % self.path
r = self.jfs.post(url)
return r
def rename(self, newpath):
"Move file to a new name, possibly a whole new path"
# POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/testFolder/testFile.txt?mv=/**USERNAME**/Jotta/Sync/testFolder/renamedTestFile.txt
url = '%s?mv=/%s%s' % (self.path, self.jfs.username, newpath)
r = self.jfs.post(url, extra_headers={'Content-Type':'application/octet-stream'})
return r
def thumb(self, size=BIGTHUMB):
'''Get a thumbnail as string or None if the file isnt an image
size would be one of JFSFile.BIGTHUMB, .MEDIUMTHUMB, .SMALLTHUMB or .XLTHUMB'''
if not self.is_image():
return None
if not size in (self.BIGTHUMB, self.MEDIUMTHUMB, self.SMALLTHUMB, self.XLTHUMB):
raise JFSError('Invalid thumbnail size: %s for image %s' % (size, self.path))
return self.jfs.raw('%s?mode=thumb&ts=%s' % (self.path, size))
@property
def revisionNumber(self):
'return int of current revision'
return int(self.f.currentRevision.number)
@property
def created(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.currentRevision.created))
@property
def modified(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.currentRevision.modified))
@property
def updated(self):
'return datetime.datetime'
return dateutil.parser.parse(str(self.f.currentRevision.updated))
@property
def size(self):
'return int of size in bytes'
return int(self.f.currentRevision.size)
@property
def md5(self):
return str(self.f.currentRevision.md5)
@property
def mime(self):
return unicode(self.f.currentRevision.mime)
@property
def state(self):
return unicode(self.f.currentRevision.state)
class JFSMountPoint(JFSFolder):
'OO interface to a mountpoint, for convenient access. Type less, do more.'
def __init__(self, mountpointobject, jfs, parentpath): # folderobject from lxml.objectify
super(JFSMountPoint, self).__init__(mountpointobject, jfs, parentpath)
self.folder = mountpointobject # name it 'folder' because of inheritance
def delete(self):
"override inherited method that makes no sense here"
raise JFSError('Cant delete a mountpoint')
def rename(self, newpath):
"override inherited method that makes no sense here"
raise JFSError('Cant rename a mountpoint')
@property
def name(self):
return unicode(self.folder.name)
@property
def size(self):
'Return int of size in bytes'
return int(self.folder.size)
@property
def modified(self):
'Return datetime.datetime'
return dateutil.parser.parse(str(self.folder.modified))
class JFSDevice(object):
'''OO interface to a device, for convenient access. Type less, do more.
Note that sometimes we cheat a little and instantiate this object with only the elements
available from <user>, in which case some elements aren't there.'''
""" raw xml example:
<device time="2014-02-20-T21:02:42Z" host="dn-036.site-000.jotta.no">
<name xml:space="preserve">laptop</name>
<type>LAPTOP</type>
<sid>d831efc4-f885-4d97-bd8d-</sid>
<size>371951820971</size>
<modified>2014-02-20-T14:03:52Z</modified>
<!-- the following elements are only available if we get the metadata from
the http path explicitly.
you won't find it here under the <user/> element -->
<user>hgl</user>
<mountPoints>
<mountPoint>
<name xml:space="preserve">backup</name>
<size>372544055053</size>
<modified>2014-02-20-T14:03:52Z</modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Desktop</name>
<size>581758</size>
<modified>2010-11-12-T20:44:15Z</modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Documents</name>
<size>417689097</size>
<modified>2010-12-19-T22:40:16Z</modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Downloads</name>
<size>0</size>
<modified>2010-11-14-T21:00:07Z</modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Pictures</name>
<size>5150529</size>
<modified>2010-11-29-T11:13:03Z</modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Videos</name>
<size>13679997</size>
<modified>2010-11-29-T11:13:46Z</modified>
</mountPoint>
</mountPoints>
<metadata first="" max="" total="6" num_mountpoints="6"/>
</device>
"""
def __init__(self, deviceobject, jfs, parentpath): # deviceobject from lxml.objectify
self.dev = deviceobject
self._jfs = jfs
self.parentPath = parentpath
self.mountPoints = {unicode(mp.name):mp for mp in self.mountpointobjects()}
def contents(self, path=None):
"""Get _all_ metadata for this device.
Call this method if you have the lite/abbreviated device info from e.g. <user/>. """
if isinstance(path, object) and hasattr(path, 'name'):
log.debug("passed an object, use .'name' as path value")
# passed an object, use .'name' as path value
path = '/%s' % path.name
c = self._jfs.get('%s%s' % (self.path, path or '/'))
return c
def mountpointobjects(self):
try:
return [ JFSMountPoint(obj, self._jfs, self.path) for obj in self.contents().mountPoints.iterchildren() ]
except AttributeError:
# there are no mountpoints. this may happen on newly created devices. see github bug#26
return []
def files(self, mountPoint):
"""Get an iterator of JFSFile() from the given mountPoint.
"mountPoint" may be either an actual mountPoint element from JFSDevice.mountPoints{} or its .name. """
if isinstance(mountPoint, six.string_types):
# shortcut: pass a mountpoint name
mountPoint = self.mountPoints[mountPoint]
try:
return [JFSFile(f, self, parentpath='%s/%s' % (self.path, mountPoint.name)) for f in self.contents(mountPoint).files.iterchildren()]
except AttributeError as err:
# no files at all
return [x for x in []]
def folders(self, mountPoint):
"""Get an iterator of JFSFolder() from the given mountPoint.
"mountPoint" may be either an actual mountPoint element from JFSDevice.mountPoints{} or its .name. """
if isinstance(mountPoint, six.string_types):
# shortcut: pass a mountpoint name
mountPoint = self.mountPoints[mountPoint]
try:
return [JFSFolder(f, self, parentpath='%s/%s' % (self.path, mountPoint.name)) for f in self.contents(mountPoint).folders.iterchildren()]
except AttributeError as err:
# no files at all
return [x for x in []]
def new_mountpoint(self, name):
"""Create a new mountpoint"""
url = '%s' % posixpath.join(self.path, name)
r = self._jfs.post(url, extra_headers={'content-type': 'application/x-www-form-urlencoded'})
return r
@property
def modified(self):
'Return datetime.datetime'
return dateutil.parser.parse(str(self.dev.modified))
@property
def path(self):
return posixpath.join(self.parentPath, self.name)
@property
def name(self):
return unicode(self.dev.name)
@property
def type(self):
return unicode(self.dev.type)
@property
def size(self):
'Return int of size in bytes'
return int(self.dev.size)
@property
def sid(self):
return str(self.dev.sid)
class JFSenableSharing(object):
'wrap enableSharing element in a python class'
"""<enableSharing>
<files>
<file name="V1B.docx" uuid="d4490ff3-505c-4ecd-9994-583a6668d3b9">
<publicURI>33cb006a8ec6493a9dabab48503d022b</publicURI>
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2014-10-08-T17:26:12Z</created>
<modified>2014-10-08-T17:26:12Z</modified>
<mime>application/msword</mime>
<mstyle>APPLICATION_MSWORD</mstyle>
<size>12882</size>
<md5>5074ad00d3d97f9b938c46c78a97e817</md5>
<updated>2014-10-08-T15:27:10Z</updated>
</currentRevision>
</file>
</files>
</enableSharing>"""
def __init__(self, sharing, jfs): # deviceobject from lxml.objectify
self.sharing = sharing
self.jfs = jfs
def sharedFiles(self):
'iterate over shared files and get their public URI'
for f in self.sharing.files.iterchildren():
yield (f.attrib['name'], f.attrib['uuid'],
'https://www.jottacloud.com/p/%s/%s' % (self.jfs.username, f.publicURI.text))
class JFS(object):
def __init__(self, auth=None):
from requests.auth import HTTPBasicAuth
self.apiversion = '2.2' # hard coded per october 2014
self.session = requests.Session() # create a session for connection pooling, ssl keepalives and cookie jar
self.session.stream = True
if not auth:
auth = get_auth_info()
self.username, password = auth
self.session.auth = HTTPBasicAuth(self.username, password)
self.session.verify = certifi.where()
self.session.headers = {'User-Agent':'jottalib %s (https://github.com/havardgulldahl/jottalib)' % (__version__, ),
'X-JottaAPIVersion': self.apiversion,
}
self.rootpath = JFS_ROOT + self.username
self.fs = self.get(self.rootpath)
def escapeUrl(self, url):
separators = [
'?dl=true',
'?mkDir=true',
'?dlDir=true',
'?mvDir=',
'?mv=',
'?mode=list',
'?mode=bin',
'?mode=thumb&ts='
]
separator = separators[0]
for sep in separators:
if sep in url:
separator = sep
break
urlparts = url.rsplit(separator, 1)
if(len(urlparts) == 2):
url = quote(urlparts[0], safe=self.rootpath) + separator + urlparts[1]
else:
url = quote(urlparts[0], safe=self.rootpath)
return url
def request(self, url, extra_headers=None):
'Make a GET request for url, with or without caching'
if not url.startswith('http'):
# relative url
url = self.rootpath + url
log.debug("getting url: %s, extra_headers=%s", url, extra_headers)
if extra_headers is None: extra_headers={}
r = self.session.get(url, headers=extra_headers)
if r.status_code in ( 500, ):
raise JFSError(r.reason)
return r
def raw(self, url, extra_headers=None):
'Make a GET request for url and return whatever content we get'
r = self.request(url, extra_headers=extra_headers)
# uncomment to dump raw xml
# with open('/tmp/%s.xml' % time.time(), 'wb') as f:
# f.write(r.content)
if not r.ok:
log.warning('HTTP GET failed: %s', r.text)
o = lxml.objectify.fromstring(r.content)
JFSError.raiseError(o, url)
return r.content
def get(self, url):
'Make a GET request for url and return the response content as a generic lxml object'
url = self.escapeUrl(url)
o = lxml.objectify.fromstring(self.raw(url))
if o.tag == 'error':
JFSError.raiseError(o, url)
return o
def getObject(self, url_or_requests_response):
'Take a url or some xml response from JottaCloud and wrap it up with the corresponding JFS* class'
if isinstance(url_or_requests_response, requests.models.Response):
url = url_or_requests_response.url
o = lxml.objectify.fromstring(url_or_requests_response.content)
else:
url = url_or_requests_response
o = self.get(url)
parent = os.path.dirname(url).replace('up.jottacloud.com', 'www.jottacloud.com')
if o.tag == 'error':
JFSError.raiseError(o, url)
elif o.tag == 'device': return JFSDevice(o, jfs=self, parentpath=parent)
elif o.tag == 'folder': return JFSFolder(o, jfs=self, parentpath=parent)
elif o.tag == 'mountPoint': return JFSMountPoint(o, jfs=self, parentpath=parent)
elif o.tag == 'restoredFiles': return JFSFile(o, jfs=self, parentpath=parent)
elif o.tag == 'deleteFiles': return JFSFile(o, jfs=self, parentpath=parent)
elif o.tag == 'file':
try:
if o.latestRevision.state == 'INCOMPLETE':
return JFSIncompleteFile(o, jfs=self, parentpath=parent)
except AttributeError:
return JFSFile(o, jfs=self, parentpath=parent)
elif o.tag == 'enableSharing': return JFSenableSharing(o, jfs=self)
elif o.tag == 'user':
self.fs = o
return self.fs
elif o.tag == 'filedirlist': return JFSFileDirList(o, jfs=self, parentpath=parent)
raise JFSError("invalid object: %s <- %s" % (repr(o), url_or_requests_response))
def stream(self, url, chunk_size=64*1024):
'Iterator to get remote content by chunk_size (bytes)'
r = self.request(url)
for chunk in r.iter_content(chunk_size):
yield chunk
def post(self, url, content='', files=None, params=None, extra_headers={}, upload_callback=None):
'HTTP Post files[] or content (unicode string) to url'
if not url.startswith('http'):
# relative url
url = self.rootpath + url
log.debug('posting content (len %s) to url %s', len(content) if content is not None else '?', url)
headers = self.session.headers.copy()
headers.update(**extra_headers)
if not files is None:
m = requests_toolbelt.MultipartEncoder(fields=files)
if upload_callback is not None:
m_len = m.len # compute value for callback closure
def callback(monitor):
upload_callback(monitor, m_len)
m = requests_toolbelt.MultipartEncoderMonitor(m, callback)
headers['content-type'] = m.content_type
else:
m = content
url = self.escapeUrl(url)
r = self.session.post(url, data=m, params=params, headers=headers)
if not r.ok:
log.warning('HTTP POST failed: %s', r.text)
raise JFSError(r.reason)
return self.getObject(r) # return a JFS* class
def up(self, path, fileobject, upload_callback=None, resume_offset=None):
"Upload a fileobject to path, HTTP POST-ing to up.jottacloud.com, using the JottaCloud API"
"""
*** WHAT DID I DO?: created file
***
POST https://up.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/testFolder/testFile.txt?cphash=d41d8cd98f00b204e9800998ecf8427e HTTP/1.1
User-Agent: Desktop_Jottacloud 3.0.22.203 Windows_8 6.2.9200 x86_64
Authorization: Basic ******************
X-JottaAPIVersion: 2.2
X-Jfs-DeviceName: **CENSORED**
JCreated: 2014-10-26T12:33:09Z+00:00
JModified: 2014-10-26T12:33:09Z+00:00
JMd5: d41d8cd98f00b204e9800998ecf8427e
JSize: 0
jx_csid: dOq1NCRer6uxuR/bFxihasj4QzBU3Tn7S2jVF1CE71YW1fGhxPFYYsw2T0XYjnJBtxKQzhWixmg+u5kp8bJtvMpIFHbhSDmPPSk+PVBf2UdFhXxli4YEII9a97eO4XBfn5QWAV1LJ2Z9l59jmnLkJQgfOyexkuQbxHdSLgQPXu8=
jx_lisence: M1v3p31oQf2OXvyAn2GvfS2I2oiMXrw+cofuMVHHI/2K+wlxhj22VkON6fN6fJMsGNcMzvcFYfmKPgL0Yf8TCO5A/6ULk6N8LctY3+fPegx+Jgbyc4hh0IXwnOdqa+UZ6Lg1ub4VXr5XnX3P3IxeVDg0VbcJnzv4TbFA+oMXmfM=
Content-Type: application/octet-stream
Content-Length: 0
Connection: Keep-Alive
Accept-Encoding: gzip
Accept-Language: nb-NO,en,*
Host: up.jottacloud.com
"""
url = path.replace('www.jottacloud.com', 'up.jottacloud.com')
# Calculate file length
fileobject.seek(0,2)
contentlen = fileobject.tell()
# Rewind read head to correct offset
# If we're resuming a borked upload, continue from that offset
fileobject.seek(resume_offset if resume_offset is not None else 0)
# Calculate file md5 hash
md5hash = calculate_md5(fileobject)
log.debug('posting content (len %s, hash %s) to url %s', contentlen, md5hash, url)
now = datetime.datetime.now().isoformat()
params = {'cphash': md5hash}
m = requests_toolbelt.MultipartEncoder({
'md5': ('', md5hash),
'modified': ('', now),
'created': ('', now),
'file': (os.path.basename(url), fileobject, 'application/octet-stream'),
})
headers = {'JMd5':md5hash,
'JCreated': now,
'JModified': now,
'X-Jfs-DeviceName': 'Jotta',
'JSize': contentlen,
'jx_csid': '',
'jx_lisence': '',
'content-type': m.content_type,
}
fileobject.seek(0) # rewind read index for requests.post
files = {'md5': ('', md5hash),
'modified': ('', now),
'created': ('', now),
'file': (os.path.basename(url), fileobject, 'application/octet-stream')}
return self.post(url, None, files=files, params=params, extra_headers=headers, upload_callback=upload_callback)
def new_device(self, name, type):
"""Create a new (backup) device on jottacloud. Types can be one of
['workstation', 'imac', 'laptop', 'macbook', 'ipad', 'android', 'iphone', 'windows_phone']
"""
# at least android client also includes a "cid" with is derived from the unique device id
# and encrypted with a public key in the apk. The field appears to be optional
url = '%s' % posixpath.join(self.rootpath, name)
r = self.post(url, {'type': type})
return r
# property overloading
@property
def devices(self):
'return generator of configured devices'
return self.fs is not None and [JFSDevice(d, self, parentpath=self.rootpath) for d in self.fs.devices.iterchildren()] or [x for x in []]
@property
def locked(self):
'return bool'
return bool(self.fs.locked) if self.fs is not None else None
@property
def read_locked(self):
'return bool'
return bool(self.fs['read-locked']) if self.fs is not None else None
@property
def write_locked(self):
'return bool'
return bool(self.fs['write-locked']) if self.fs is not None else None
@property
def capacity(self):
'Return int of storage capacity in bytes. A value of -1 means "unlimited"'
return int(self.fs.capacity) if self.fs is not None else 0
@property
def usage(self):
'Return int of storage usage in bytes'
return int(self.fs.usage) if self.fs is not None else 0
|
thusoy/jottalib
|
src/jottalib/JFS.py
|
Python
|
gpl-3.0
| 38,435
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beekeepers', '0002_add_beekeepers_survey_model'),
]
operations = [
migrations.AddField(
model_name='apiary',
name='deleted_at',
field=models.DateTimeField(help_text='When was the Apiary deleted', null=True),
),
]
|
project-icp/bee-pollinator-app
|
src/icp/apps/beekeepers/migrations/0003_apiary_deleted.py
|
Python
|
apache-2.0
| 457
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_policy import policy
from sahara.common.policies import base
job_types_policies = [
policy.DocumentedRuleDefault(
name=base.DATA_PROCESSING_JOB_TYPE % 'list',
check_str=base.UNPROTECTED,
description='List job types.',
operations=[{'path': '/v2/job-types',
'method': 'GET'}]),
]
def list_rules():
return job_types_policies
|
openstack/sahara
|
sahara/common/policies/job_type.py
|
Python
|
apache-2.0
| 944
|
from dataccess import psget
|
hoidn/LCLS
|
dataccess/tests/test_psget.py
|
Python
|
gpl-3.0
| 30
|
from __future__ import with_statement
import importlib
from fabric.api import *
from fabric.contrib.console import confirm
from .load_config import load_config
from .machine import install_machine
from .geoserver import deploy_geoserver
from .api import deploy_api
@task
def deploy():
load_config()
install_machine()
deploy_geoserver()
deploy_api()
|
openmaraude/fab_taxi
|
fabfile.py
|
Python
|
mit
| 368
|
import json
import os
from django.core.urlresolvers import reverse
from django.db import connections
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from healthcheck.contrib.django.status_endpoint import views
class StatusEndpointViewsTestCase(TestCase):
urls = 'healthcheck.contrib.django.status_endpoint.urls'
def setUp(self):
self.factory = RequestFactory()
@override_settings(
STATUS_CHECK_DBS=True,
STATUS_CHECK_FILES=('/etc/quiesce',)
)
def test_default_checks(self):
request = self.factory.get(reverse(views.status))
response = views.status(request)
self.assertEqual(response.status_code, 200)
@override_settings(
STATUS_CHECK_DBS=True,
STATUS_CHECK_FILES=()
)
def test_dont_check_files(self):
request = self.factory.get(reverse(views.status))
response = views.status(request)
response_json = json.loads(response.content.decode())
self.assertTrue(
"quiesce file doesn't exist" not in response_json)
self.assertTrue(
'Django Databases Health Check' in response_json)
db_names = response_json['Django Databases Health Check']['details']
self.assertTrue(
all(connection.alias in db_names
for connection in connections.all()))
self.assertEqual(response.status_code, 200)
@override_settings(
STATUS_CHECK_DBS=False,
STATUS_CHECK_FILES=()
)
def test_no_checks_raises_200(self):
request = self.factory.get(reverse(views.status))
response = views.status(request)
response = {
'content': json.loads(response.content.decode()),
'status': response.status_code,
}
expected_response = {
'content': 'There were no checks.',
'status': 200,
}
self.assertEqual(response, expected_response)
@override_settings(
STATUS_CHECK_DBS=False,
STATUS_CHECK_FILES=('/usr/bin/env',)
)
def test_failed_check(self):
request = self.factory.get(reverse(views.status))
response = views.status(request)
response = {
'content': json.loads(response.content.decode()),
'status': response.status_code,
}
expected_response = {
'content': {
"quiesce file doesn't exist": {
'details': {
'/usr/bin/env': 'FILE EXISTS'
},
'status': 'FAILED'
}
},
'status': 500,
}
self.assertEqual(response, expected_response)
|
yola/healthcheck
|
healthcheck/contrib/django/status_endpoint/tests/test_views.py
|
Python
|
mit
| 2,769
|
#
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
from shutil import rmtree
import tempfile
from xml.dom import minidom
import ethtool
from vdsm import netinfo
from monkeypatch import MonkeyPatch, MonkeyPatchScope
from testrunner import VdsmTestCase as TestCaseBase
# speeds defined in ethtool
ETHTOOL_SPEEDS = set([10, 100, 1000, 2500, 10000])
class TestNetinfo(TestCaseBase):
def testNetmaskConversions(self):
path = os.path.join(os.path.dirname(__file__), "netmaskconversions")
with open(path) as netmaskFile:
for line in netmaskFile:
if line.startswith('#'):
continue
bitmask, address = [value.strip() for value in line.split()]
self.assertEqual(netinfo.prefix2netmask(int(bitmask)),
address)
self.assertRaises(ValueError, netinfo.prefix2netmask, -1)
self.assertRaises(ValueError, netinfo.prefix2netmask, 33)
def testSpeedInvalidNic(self):
nicName = 'DUMMYNICDEVNAME'
self.assertTrue(nicName not in netinfo.nics())
s = netinfo.speed(nicName)
self.assertEqual(s, 0)
def testSpeedInRange(self):
for d in netinfo.nics():
s = netinfo.speed(d)
self.assertFalse(s < 0)
self.assertTrue(s in ETHTOOL_SPEEDS or s == 0)
def testIntToAddress(self):
num = [0, 1, 16777344, 16777408, 4294967295]
ip = ["0.0.0.0", "1.0.0.0", "128.0.0.1",
"192.0.0.1", "255.255.255.255"]
for n, addr in zip(num, ip):
self.assertEqual(addr, netinfo.intToAddress(n))
def testIPv6StrToAddress(self):
inputs = [
'00000000000000000000000000000000',
'00000000000000000000000000000001',
'20010db8000000000001000000000002',
'20010db8aaaabbbbccccddddeeeeffff',
'fe80000000000000be305bbffec58446']
ip = [
'::',
'::1',
'2001:db8::1:0:0:2',
'2001:db8:aaaa:bbbb:cccc:dddd:eeee:ffff',
'fe80::be30:5bbf:fec5:8446']
for s, addr in zip(inputs, ip):
self.assertEqual(addr, netinfo.ipv6StrToAddress(s))
@MonkeyPatch(netinfo, 'networks', lambda: {'fake': {'bridged': True}})
def testGetNonExistantBridgeInfo(self):
# Getting info of non existing bridge should not raise an exception,
# just log a traceback. If it raises an exception the test will fail as
# it should.
netinfo.get()
def testMatchNicName(self):
self.assertTrue(netinfo._match_name('test1', ['test0', 'test1']))
def testIPv4toMapped(self):
self.assertEqual('::ffff:127.0.0.1', netinfo.IPv4toMapped('127.0.0.1'))
def testGetIfaceByIP(self):
for dev in ethtool.get_interfaces_info(ethtool.get_active_devices()):
ipaddrs = map(
lambda etherinfo_ipv6addr: etherinfo_ipv6addr.address,
dev.get_ipv6_addresses())
ipaddrs.append(dev.ipv4_address)
for ip in ipaddrs:
self.assertEqual(dev.device, netinfo.getIfaceByIP(ip))
def _dev_dirs_setup(self, dir_fixture):
"""
Creates test fixture which is a dir structure:
em, me, fake0, fake1 devices that should managed by vdsm.
hid0, hideons not managed by being hidden nics.
jbond not managed by being hidden bond.
me0, me1 not managed by being nics enslaved to jbond hidden bond.
/tmp/.../em/device
/tmp/.../me/device
/tmp/.../fake0
/tmp/.../fake
/tmp/.../hid0/device
/tmp/.../hideous/device
/tmp/.../me0/device
/tmp/.../me1/device
returns related containing dir.
"""
dev_dirs = [os.path.join(dir_fixture, dev) for dev in
('em/device', 'me/device', 'fake', 'fake0',
'hid/device', 'hideous/device',
'me0/device', 'me1/device')]
for dev_dir in dev_dirs:
os.makedirs(dev_dir)
bonding_path = os.path.join(dir_fixture, 'jbond/bonding')
os.makedirs(bonding_path)
with open(os.path.join(bonding_path, 'slaves'), 'w') as f:
f.write('me0 me1')
return dir_fixture
def _config_setup(self):
"""
Returns an instance of a config stub.
With patterns:
* hid* for hidden nics.
* fake* for fake nics.
* jb* for hidden bonds.
"""
class Config(object):
def get(self, unused_vars, key):
if key == 'hidden_nics':
return 'hid*'
elif key == 'fake_nics':
return 'fake*'
else:
return 'jb*'
return Config()
def testNics(self):
temp_dir = tempfile.mkdtemp()
with MonkeyPatchScope([(netinfo, 'BONDING_SLAVES',
temp_dir + '/%s/bonding/slaves'),
(netinfo, 'NET_PATH',
self._dev_dirs_setup(temp_dir)),
(netinfo, 'config', self._config_setup())]):
try:
self.assertEqual(set(netinfo.nics()),
set(['em', 'me', 'fake', 'fake0']))
finally:
rmtree(temp_dir)
def testGetBandwidthQos(self):
notEmptyDoc = minidom.parseString("""<bandwidth>
<inbound average='4500' burst='5400' />
<outbound average='4500' burst='5400' peak='101' />
</bandwidth>""")
expectedQosNotEmpty = netinfo._Qos(inbound={'average': '4500',
'burst': '5400',
'peak': ''},
outbound={'average': '4500',
'burst': '5400',
'peak': '101'})
emptyDoc = minidom.parseString("<whatever></whatever>")
self.assertEqual(expectedQosNotEmpty,
netinfo._parseBandwidthQos(notEmptyDoc))
self.assertEqual(netinfo._Qos('', ''),
netinfo._parseBandwidthQos(emptyDoc))
|
edwardbadboy/vdsm-ubuntu
|
tests/netinfoTests.py
|
Python
|
gpl-2.0
| 7,176
|
# coding=utf-8
from abc import ABCMeta, abstractmethod
from typing import Optional
from weakref import ref
from logging import getLogger
from ultros.core.networks.base.connectors import base as base_connector
from ultros.core.networks.base.networks import base as base_network
__author__ = "Gareth Coles"
class BaseServer(metaclass=ABCMeta):
def __init__(self, name: str, network: "base_network.BaseNetwork"):
self.name = name
self._network = ref(network)
self.logger = getLogger(self.name) # TODO: Logging
@property
def network(self) -> "base_network.BaseNetwork":
return self._network()
@abstractmethod
async def connector_connected(self, connector: "base_connector.BaseConnector"):
pass
@abstractmethod
async def connector_disconnected(self, connector: "base_connector.BaseConnector",
exc: Optional[Exception]):
pass
|
UltrosBot/Ultros3K
|
src/ultros/core/networks/base/servers/base.py
|
Python
|
artistic-2.0
| 942
|
from .list_item import SimpleItem
|
d2emon/generator-pack
|
src/factory/__init__.py
|
Python
|
gpl-3.0
| 34
|
# -*- coding: utf-8 -*-
"""
Integration tests for submitting problem responses and getting grades.
"""
import json
import os
from textwrap import dedent
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from mock import patch
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import (
OptionResponseXMLFactory, CustomResponseXMLFactory, SchematicResponseXMLFactory,
CodeResponseXMLFactory,
)
from courseware import grades
from courseware.models import StudentModule, StudentModuleHistory
from courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from student.tests.factories import UserFactory
from student.models import anonymous_id_for_user
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
from openedx.core.djangoapps.credit.api import (
set_credit_requirements, get_credit_requirement_status
)
from openedx.core.djangoapps.credit.models import CreditCourse, CreditProvider
from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory
from openedx.core.djangoapps.grading_policy.utils import MaxScoresCache
class TestSubmittingProblems(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that a course gets graded properly.
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
def setUp(self):
super(TestSubmittingProblems, self).setUp(create_user=False)
# Create course
self.course = CourseFactory.create(display_name=self.COURSE_NAME, number=self.COURSE_SLUG)
assert self.course, "Couldn't load course %r" % self.COURSE_NAME
# create a test student
self.student = 'view@test.com'
self.password = 'foo'
self.create_account('u1', self.student, self.password)
self.activate_user(self.student)
self.enroll(self.course)
self.student_user = User.objects.get(email=self.student)
self.factory = RequestFactory()
def refresh_course(self):
"""
Re-fetch the course from the database so that the object being dealt with has everything added to it.
"""
self.course = self.store.get_course(self.course.id)
def problem_location(self, problem_url_name):
"""
Returns the url of the problem given the problem's name
"""
return self.course.id.make_usage_key('problem', problem_url_name)
def modx_url(self, problem_location, dispatch):
"""
Return the url needed for the desired action.
problem_location: location of the problem on which we want some action
dispatch: the the action string that gets passed to the view as a kwarg
example: 'check_problem' for having responses processed
"""
return reverse(
'xblock_handler',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(problem_location.to_deprecated_string()),
'handler': 'xmodule_handler',
'suffix': dispatch,
}
)
def submit_question_answer(self, problem_url_name, responses):
"""
Submit answers to a question.
Responses is a dict mapping problem ids to answers:
{'2_1': 'Correct', '2_2': 'Incorrect'}
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_check')
answer_key_prefix = 'input_{}_'.format(problem_location.html_id())
# format the response dictionary to be sent in the post request by adding the above prefix to each key
response_dict = {(answer_key_prefix + k): v for k, v in responses.items()}
resp = self.client.post(modx_url, response_dict)
return resp
def look_at_question(self, problem_url_name):
"""
Create state for a problem, but don't answer it
"""
location = self.problem_location(problem_url_name)
modx_url = self.modx_url(location, "problem_get")
resp = self.client.get(modx_url)
return resp
def reset_question_answer(self, problem_url_name):
"""
Reset specified problem for current user.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_reset')
resp = self.client.post(modx_url)
return resp
def show_question_answer(self, problem_url_name):
"""
Shows the answer to the current student.
"""
problem_location = self.problem_location(problem_url_name)
modx_url = self.modx_url(problem_location, 'problem_show')
resp = self.client.post(modx_url)
return resp
def add_dropdown_to_section(self, section_location, name, num_inputs=2):
"""
Create and return a dropdown problem.
section_location: location object of section in which to create the problem
(problems must live in a section to be graded properly)
name: string name of the problem
num_input: the number of input fields to create in the problem
"""
prob_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=num_inputs,
weight=num_inputs,
options=['Correct', 'Incorrect', u'ⓤⓝⓘⓒⓞⓓⓔ'],
correct_option='Correct'
)
problem = ItemFactory.create(
parent_location=section_location,
category='problem',
data=prob_xml,
metadata={'rerandomize': 'always'},
display_name=name
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
return problem
def add_graded_section_to_course(self, name, section_format='Homework', late=False, reset=False, showanswer=False):
"""
Creates a graded homework section within a chapter and returns the section.
"""
# if we don't already have a chapter create a new one
if not(hasattr(self, 'chapter')):
self.chapter = ItemFactory.create(
parent_location=self.course.location,
category='chapter'
)
if late:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format, 'due': '2013-05-20T23:30'}
)
elif reset:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
rerandomize='always',
metadata={
'graded': True,
'format': section_format,
}
)
elif showanswer:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
showanswer='never',
metadata={
'graded': True,
'format': section_format,
}
)
else:
section = ItemFactory.create(
parent_location=self.chapter.location,
display_name=name,
category='sequential',
metadata={'graded': True, 'format': section_format}
)
# now that we've added the problem and section to the course
# we fetch the course from the database so the object we are
# dealing with has these additions
self.refresh_course()
return section
def add_grading_policy(self, grading_policy):
"""
Add a grading policy to the course.
"""
self.course.grading_policy = grading_policy
self.update_course(self.course, self.student_user.id)
self.refresh_course()
def get_grade_summary(self):
"""
calls grades.grade for current user and course.
the keywords for the returned object are
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
"""
fake_request = self.factory.get(
reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})
)
fake_request.user = self.student_user
return grades.grade(self.student_user, fake_request, self.course)
def get_progress_summary(self):
"""
Return progress summary structure for current user and course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
"""
fake_request = self.factory.get(
reverse('progress', kwargs={'course_id': self.course.id.to_deprecated_string()})
)
progress_summary = grades.progress_summary(
self.student_user, fake_request, self.course
)
return progress_summary
def check_grade_percent(self, percent):
"""
Assert that percent grade is as expected.
"""
grade_summary = self.get_grade_summary()
self.assertEqual(grade_summary['percent'], percent)
def earned_hw_scores(self):
"""
Global scores, each Score is a Problem Set.
Returns list of scores: [<points on hw_1>, <points on hw_2>, ..., <points on hw_n>]
"""
return [s.earned for s in self.get_grade_summary()['totaled_scores']['Homework']]
def score_for_hw(self, hw_url_name):
"""
Returns list of scores for a given url.
Returns list of scores for the given homework:
[<points on problem_1>, <points on problem_2>, ..., <points on problem_n>]
"""
# list of grade summaries for each section
sections_list = []
for chapter in self.get_progress_summary():
sections_list.extend(chapter['sections'])
# get the first section that matches the url (there should only be one)
hw_section = next(section for section in sections_list if section.get('url_name') == hw_url_name)
return [s.earned for s in hw_section['scores']]
@attr('shard_1')
class TestCourseGrader(TestSubmittingProblems):
"""
Suite of tests for the course grader.
"""
def basic_setup(self, late=False, reset=False, showanswer=False):
"""
Set up a simple course for testing basic grading functionality.
"""
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}],
"GRADE_CUTOFFS": {
'A': .9,
'B': .33
}
}
self.add_grading_policy(grading_policy)
# set up a simple course with four problems
self.homework = self.add_graded_section_to_course('homework', late=late, reset=reset, showanswer=showanswer)
self.add_dropdown_to_section(self.homework.location, 'p1', 1)
self.add_dropdown_to_section(self.homework.location, 'p2', 1)
self.add_dropdown_to_section(self.homework.location, 'p3', 1)
self.refresh_course()
def weighted_setup(self):
"""
Set up a simple course for testing weighted grading functionality.
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 0.25
}, {
"type": "Final",
"name": "Final Section",
"short_label": "Final",
"weight": 0.75
}
]
}
self.add_grading_policy(grading_policy)
# set up a structure of 1 homework and 1 final
self.homework = self.add_graded_section_to_course('homework')
self.problem = self.add_dropdown_to_section(self.homework.location, 'H1P1')
self.final = self.add_graded_section_to_course('Final Section', 'Final')
self.final_question = self.add_dropdown_to_section(self.final.location, 'FinalQuestion')
def dropping_setup(self):
"""
Set up a simple course for testing the dropping grading functionality.
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 3,
"drop_count": 1,
"short_label": "HW",
"weight": 1
}
]
}
self.add_grading_policy(grading_policy)
# Set up a course structure that just consists of 3 homeworks.
# Since the grading policy drops 1 entire homework, each problem is worth 25%
# names for the problem in the homeworks
self.hw1_names = ['h1p1', 'h1p2']
self.hw2_names = ['h2p1', 'h2p2']
self.hw3_names = ['h3p1', 'h3p2']
self.homework1 = self.add_graded_section_to_course('homework1')
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[0], 1)
self.add_dropdown_to_section(self.homework1.location, self.hw1_names[1], 1)
self.homework2 = self.add_graded_section_to_course('homework2')
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[0], 1)
self.add_dropdown_to_section(self.homework2.location, self.hw2_names[1], 1)
self.homework3 = self.add_graded_section_to_course('homework3')
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[0], 1)
self.add_dropdown_to_section(self.homework3.location, self.hw3_names[1], 1)
def test_submission_late(self):
"""Test problem for due date in the past"""
self.basic_setup(late=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_reset(self):
"""Test problem ProcessingErrors due to resets"""
self.basic_setup(reset=True)
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
# submit a second time to draw NotFoundError
resp = self.submit_question_answer('p1', {'2_1': 'Correct'})
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_submission_show_answer(self):
"""Test problem for ProcessingErrors due to showing answer"""
self.basic_setup(showanswer=True)
resp = self.show_question_answer('p1')
self.assertEqual(resp.status_code, 200)
err_msg = (
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
self.assertEqual(json.loads(resp.content).get("success"), err_msg)
def test_show_answer_doesnt_write_to_csm(self):
self.basic_setup()
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the state entry for that problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
# count how many state history entries there are
baseline = StudentModuleHistory.objects.filter(
student_module=student_module
)
baseline_count = baseline.count()
self.assertEqual(baseline_count, 3)
# now click "show answer"
self.show_question_answer('p1')
# check that we don't have more state history entries
csmh = StudentModuleHistory.objects.filter(
student_module=student_module
)
current_count = csmh.count()
self.assertEqual(current_count, 3)
def test_grade_with_max_score_cache(self):
"""
Tests that the max score cache is populated after a grading run
and that the results of grading runs before and after the cache
warms are the same.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.look_at_question('p2')
self.assertTrue(
StudentModule.objects.filter(
module_state_key=self.problem_location('p2')
).exists()
)
location_to_cache = unicode(self.problem_location('p2'))
max_scores_cache = MaxScoresCache.create_for_course(self.course)
# problem isn't in the cache
max_scores_cache.fetch_from_remote([location_to_cache])
self.assertIsNone(max_scores_cache.get(location_to_cache))
self.check_grade_percent(0.33)
# problem is in the cache
max_scores_cache.fetch_from_remote([location_to_cache])
self.assertIsNotNone(max_scores_cache.get(location_to_cache))
self.check_grade_percent(0.33)
def test_none_grade(self):
"""
Check grade is 0 to begin with.
"""
self.basic_setup()
self.check_grade_percent(0)
self.assertEqual(self.get_grade_summary()['grade'], None)
def test_b_grade_exact(self):
"""
Check that at exactly the cutoff, the grade is B.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.check_grade_percent(0.33)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_MAX_SCORE_CACHE": False})
def test_grade_no_max_score_cache(self):
"""
Tests grading when the max score cache is disabled
"""
self.test_b_grade_exact()
def test_b_grade_above(self):
"""
Check grade between cutoffs.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_a_grade(self):
"""
Check that 100 percent completion gets an A
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_wrong_answers(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
def test_submissions_api_overrides_scores(self):
"""
Check that answering incorrectly is graded properly.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
self.check_grade_percent(0.67)
self.assertEqual(self.get_grade_summary()['grade'], 'B')
# But now we mock out a get_scores call, and watch as it overrides the
# score read from StudentModule and our student gets an A instead.
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.check_grade_percent(1.0)
self.assertEqual(self.get_grade_summary()['grade'], 'A')
def test_submissions_api_anonymous_student_id(self):
"""
Check that the submissions API is sent an anonymous student ID.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
self.submit_question_answer('p3', {'2_1': 'Incorrect'})
with patch('submissions.api.get_scores') as mock_get_scores:
mock_get_scores.return_value = {
self.problem_location('p3').to_deprecated_string(): (1, 1)
}
self.get_grade_summary()
# Verify that the submissions API was sent an anonymized student ID
mock_get_scores.assert_called_with(
self.course.id.to_deprecated_string(),
anonymous_id_for_user(self.student_user, self.course.id)
)
def test_weighted_homework(self):
"""
Test that the homework section has proper weight.
"""
self.weighted_setup()
# Get both parts correct
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.25)
self.assertEqual(self.earned_hw_scores(), [2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework'), [2.0])
def test_weighted_exam(self):
"""
Test that the exam section has the proper weight.
"""
self.weighted_setup()
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(0.75)
def test_weighted_total(self):
"""
Test that the weighted total adds to 100.
"""
self.weighted_setup()
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})
self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})
self.check_grade_percent(1.0)
def dropping_homework_stage1(self):
"""
Get half the first homework correct and all of the second
"""
self.submit_question_answer(self.hw1_names[0], {'2_1': 'Correct'})
self.submit_question_answer(self.hw1_names[1], {'2_1': 'Incorrect'})
for name in self.hw2_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
def test_dropping_grades_normally(self):
"""
Test that the dropping policy does not change things before it should.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_nochange(self):
"""
Tests that grade does not change when making the global homework grade minimum not unique.
"""
self.dropping_setup()
self.dropping_homework_stage1()
self.submit_question_answer(self.hw3_names[0], {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])
self.assertEqual(self.score_for_hw('homework3'), [1.0, 0.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 1.0]) # Order matters
self.check_grade_percent(0.75)
def test_dropping_all_correct(self):
"""
Test that the lowest is dropped for a perfect score.
"""
self.dropping_setup()
self.dropping_homework_stage1()
for name in self.hw3_names:
self.submit_question_answer(name, {'2_1': 'Correct'})
self.check_grade_percent(1.0)
self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 2.0]) # Order matters
self.assertEqual(self.score_for_hw('homework3'), [1.0, 1.0])
def test_min_grade_credit_requirements_status(self):
"""
Test for credit course. If user passes minimum grade requirement then
status will be updated as satisfied in requirement status table.
"""
self.basic_setup()
self.submit_question_answer('p1', {'2_1': 'Correct'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
# Enable the course for credit
credit_course = CreditCourse.objects.create(
course_key=self.course.id,
enabled=True,
)
# Configure a credit provider for the course
CreditProvider.objects.create(
provider_id="ASU",
enable_integration=True,
provider_url="https://credit.example.com/request",
)
requirements = [{
"namespace": "grade",
"name": "grade",
"display_name": "Grade",
"criteria": {"min_grade": 0.52},
}]
# Add a single credit requirement (final grade)
set_credit_requirements(self.course.id, requirements)
self.get_grade_summary()
req_status = get_credit_requirement_status(self.course.id, self.student_user.username, 'grade', 'grade')
self.assertEqual(req_status[0]["status"], 'satisfied')
@attr('shard_1')
class ProblemWithUploadedFilesTest(TestSubmittingProblems):
"""Tests of problems with uploaded files."""
def setUp(self):
super(ProblemWithUploadedFilesTest, self).setUp()
self.section = self.add_graded_section_to_course('section')
def problem_setup(self, name, files):
"""
Create a CodeResponse problem with files to upload.
"""
xmldata = CodeResponseXMLFactory().build_xml(
allowed_files=files, required_files=files,
)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
display_name=name,
data=xmldata
)
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def test_three_files(self):
# Open the test files, and arrange to close them later.
filenames = "prog1.py prog2.py prog3.py"
fileobjs = [
open(os.path.join(settings.COMMON_TEST_DATA_ROOT, "capa", filename))
for filename in filenames.split()
]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
self.problem_setup("the_problem", filenames)
with patch('courseware.module_render.XQUEUE_INTERFACE.session') as mock_session:
resp = self.submit_question_answer("the_problem", {'2_1': fileobjs})
self.assertEqual(resp.status_code, 200)
json_resp = json.loads(resp.content)
self.assertEqual(json_resp['success'], "incorrect")
# See how post got called.
name, args, kwargs = mock_session.mock_calls[0]
self.assertEqual(name, "post")
self.assertEqual(len(args), 1)
self.assertTrue(args[0].endswith("/submit/"))
self.assertItemsEqual(kwargs.keys(), ["files", "data"])
self.assertItemsEqual(kwargs['files'].keys(), filenames.split())
@attr('shard_1')
class TestPythonGradedResponse(TestSubmittingProblems):
"""
Check that we can submit a schematic and custom response, and it answers properly.
"""
SCHEMATIC_SCRIPT = dedent("""
# for a schematic response, submission[i] is the json representation
# of the diagram and analysis results for the i-th schematic tag
def get_tran(json,signal):
for element in json:
if element[0] == 'transient':
return element[1].get(signal,[])
return []
def get_value(at,output):
for (t,v) in output:
if at == t: return v
return None
output = get_tran(submission[0],'Z')
okay = True
# output should be 1, 1, 1, 1, 1, 0, 0, 0
if get_value(0.0000004, output) < 2.7: okay = False;
if get_value(0.0000009, output) < 2.7: okay = False;
if get_value(0.0000014, output) < 2.7: okay = False;
if get_value(0.0000019, output) < 2.7: okay = False;
if get_value(0.0000024, output) < 2.7: okay = False;
if get_value(0.0000029, output) > 0.25: okay = False;
if get_value(0.0000034, output) > 0.25: okay = False;
if get_value(0.0000039, output) > 0.25: okay = False;
correct = ['correct' if okay else 'incorrect']""").strip()
SCHEMATIC_CORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 2.8],
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
SCHEMATIC_INCORRECT = json.dumps(
[['transient', {'Z': [
[0.0000004, 2.8],
[0.0000009, 0.0], # wrong.
[0.0000014, 2.8],
[0.0000019, 2.8],
[0.0000024, 2.8],
[0.0000029, 0.2],
[0.0000034, 0.2],
[0.0000039, 0.2]
]}]]
)
CUSTOM_RESPONSE_SCRIPT = dedent("""
def test_csv(expect, ans):
# Take out all spaces in expected answer
expect = [i.strip(' ') for i in str(expect).split(',')]
# Take out all spaces in student solution
ans = [i.strip(' ') for i in str(ans).split(',')]
def strip_q(x):
# Strip quotes around strings if students have entered them
stripped_ans = []
for item in x:
if item[0] == "'" and item[-1]=="'":
item = item.strip("'")
elif item[0] == '"' and item[-1] == '"':
item = item.strip('"')
stripped_ans.append(item)
return stripped_ans
return strip_q(expect) == strip_q(ans)""").strip()
CUSTOM_RESPONSE_CORRECT = "0, 1, 2, 3, 4, 5, 'Outside of loop', 6"
CUSTOM_RESPONSE_INCORRECT = "Reading my code I see. I hope you like it :)"
COMPUTED_ANSWER_SCRIPT = dedent("""
if submission[0] == "a shout in the street":
correct = ['correct']
else:
correct = ['incorrect']""").strip()
COMPUTED_ANSWER_CORRECT = "a shout in the street"
COMPUTED_ANSWER_INCORRECT = "because we never let them in"
def setUp(self):
super(TestPythonGradedResponse, self).setUp()
self.section = self.add_graded_section_to_course('section')
self.correct_responses = {}
self.incorrect_responses = {}
def schematic_setup(self, name):
"""
set up an example Circuit_Schematic_Builder problem
"""
script = self.SCHEMATIC_SCRIPT
xmldata = SchematicResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='circuitschematic.yaml',
display_name=name,
data=xmldata
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.SCHEMATIC_CORRECT
self.incorrect_responses[name] = self.SCHEMATIC_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def custom_response_setup(self, name):
"""
set up an example custom response problem using a check function
"""
test_csv = self.CUSTOM_RESPONSE_SCRIPT
expect = self.CUSTOM_RESPONSE_CORRECT
cfn_problem_xml = CustomResponseXMLFactory().build_xml(script=test_csv, cfn='test_csv', expect=expect)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=cfn_problem_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = expect
self.incorrect_responses[name] = self.CUSTOM_RESPONSE_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def computed_answer_setup(self, name):
"""
set up an example problem using an answer script'''
"""
script = self.COMPUTED_ANSWER_SCRIPT
computed_xml = CustomResponseXMLFactory().build_xml(answer=script)
ItemFactory.create(
parent_location=self.section.location,
category='problem',
boilerplate='customgrader.yaml',
data=computed_xml,
display_name=name
)
# define the correct and incorrect responses to this problem
self.correct_responses[name] = self.COMPUTED_ANSWER_CORRECT
self.incorrect_responses[name] = self.COMPUTED_ANSWER_INCORRECT
# re-fetch the course from the database so the object is up to date
self.refresh_course()
def _check_correct(self, name):
"""
check that problem named "name" gets evaluated correctly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def _check_incorrect(self, name):
"""
check that problem named "name" gets evaluated incorrectly correctly
"""
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'incorrect')
def _check_ireset(self, name):
"""
Check that the problem can be reset
"""
# first, get the question wrong
resp = self.submit_question_answer(name, {'2_1': self.incorrect_responses[name]})
# reset the question
self.reset_question_answer(name)
# then get it right
resp = self.submit_question_answer(name, {'2_1': self.correct_responses[name]})
respdata = json.loads(resp.content)
self.assertEqual(respdata['success'], 'correct')
def test_schematic_correct(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_correct(name)
def test_schematic_incorrect(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_incorrect(name)
def test_schematic_reset(self):
name = "schematic_problem"
self.schematic_setup(name)
self._check_ireset(name)
def test_check_function_correct(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_correct(name)
def test_check_function_incorrect(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_incorrect(name)
def test_check_function_reset(self):
name = 'cfn_problem'
self.custom_response_setup(name)
self._check_ireset(name)
def test_computed_correct(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_correct(name)
def test_computed_incorrect(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_incorrect(name)
def test_computed_reset(self):
name = 'computed_answer'
self.computed_answer_setup(name)
self._check_ireset(name)
@attr('shard_1')
class TestAnswerDistributions(TestSubmittingProblems):
"""Check that we can pull answer distributions for problems."""
def setUp(self):
"""Set up a simple course with four problems."""
super(TestAnswerDistributions, self).setUp()
self.homework = self.add_graded_section_to_course('homework')
self.p1_html_id = self.add_dropdown_to_section(self.homework.location, 'p1', 1).location.html_id()
self.p2_html_id = self.add_dropdown_to_section(self.homework.location, 'p2', 1).location.html_id()
self.p3_html_id = self.add_dropdown_to_section(self.homework.location, 'p3', 1).location.html_id()
self.refresh_course()
def test_empty(self):
# Just make sure we can process this without errors.
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_one_student(self):
# Basic test to make sure we have simple behavior right for a student
# Throw in a non-ASCII answer
self.submit_question_answer('p1', {'2_1': u'ⓤⓝⓘⓒⓞⓓⓔ'})
self.submit_question_answer('p2', {'2_1': 'Correct'})
distributions = grades.answer_distributions(self.course.id)
self.assertEqual(
distributions,
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
u'ⓤⓝⓘⓒⓞⓓⓔ': 1
},
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Correct': 1
}
}
)
def test_multiple_students(self):
# Our test class is based around making requests for a particular user,
# so we're going to cheat by creating another user and copying and
# modifying StudentModule entries to make them from other users. It's
# a little hacky, but it seemed the simpler way to do this.
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
self.submit_question_answer('p3', {'2_1': u'Correct'})
# Make the above submissions owned by user2
user2 = UserFactory.create()
problems = StudentModule.objects.filter(
course_id=self.course.id,
student=self.student_user
)
for problem in problems:
problem.student_id = user2.id
problem.save()
# Now make more submissions by our original user
self.submit_question_answer('p1', {'2_1': u'Correct'})
self.submit_question_answer('p2', {'2_1': u'Correct'})
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
'Correct': 2
},
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Correct': 1,
'Incorrect': 1
},
('p3', 'p3', '{}_2_1'.format(self.p3_html_id)): {
'Correct': 1
}
}
)
def test_other_data_types(self):
# We'll submit one problem, and then muck with the student_answers
# dict inside its state to try different data types (str, int, float,
# none)
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the state entry for that problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
for val in ('Correct', True, False, 0, 0.0, 1, 1.0, None):
state = json.loads(student_module.state)
state["student_answers"]['{}_2_1'.format(self.p1_html_id)] = val
student_module.state = json.dumps(state)
student_module.save()
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p1', 'p1', '{}_2_1'.format(self.p1_html_id)): {
str(val): 1
},
}
)
def test_missing_content(self):
# If there's a StudentModule entry for content that no longer exists,
# we just quietly ignore it (because we can't display a meaningful url
# or name for it).
self.submit_question_answer('p1', {'2_1': 'Incorrect'})
# Now fetch the state entry for that problem and alter it so it points
# to a non-existent problem.
student_module = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
student_module.module_state_key = student_module.module_state_key.replace(
name=student_module.module_state_key.name + "_fake"
)
student_module.save()
# It should be empty (ignored)
empty_distribution = grades.answer_distributions(self.course.id)
self.assertFalse(empty_distribution) # should be empty
def test_broken_state(self):
# Missing or broken state for a problem should be skipped without
# causing the whole answer_distribution call to explode.
# Submit p1
self.submit_question_answer('p1', {'2_1': u'Correct'})
# Now fetch the StudentModule entry for p1 so we can corrupt its state
prb1 = StudentModule.objects.get(
course_id=self.course.id,
student=self.student_user
)
# Submit p2
self.submit_question_answer('p2', {'2_1': u'Incorrect'})
for new_p1_state in ('{"student_answers": {}}', "invalid json!", None):
prb1.state = new_p1_state
prb1.save()
# p1 won't show up, but p2 should still work
self.assertEqual(
grades.answer_distributions(self.course.id),
{
('p2', 'p2', '{}_2_1'.format(self.p2_html_id)): {
'Incorrect': 1
},
}
)
@attr('shard_1')
class TestConditionalContent(TestSubmittingProblems):
"""
Check that conditional content works correctly with grading.
"""
def setUp(self):
"""
Set up a simple course with a grading policy, a UserPartition, and 2 sections, both graded as "homework".
One section is pre-populated with a problem (with 2 inputs), visible to all students.
The second section is empty. Test cases should add conditional content to it.
"""
super(TestConditionalContent, self).setUp()
self.user_partition_group_0 = 0
self.user_partition_group_1 = 1
self.partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(self.user_partition_group_0, 'alpha'),
Group(self.user_partition_group_1, 'beta')
]
)
self.course = CourseFactory.create(
display_name=self.COURSE_NAME,
number=self.COURSE_SLUG,
user_partitions=[self.partition]
)
grading_policy = {
"GRADER": [{
"type": "Homework",
"min_count": 2,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}]
}
self.add_grading_policy(grading_policy)
self.homework_all = self.add_graded_section_to_course('homework1')
self.p1_all_html_id = self.add_dropdown_to_section(self.homework_all.location, 'H1P1', 2).location.html_id()
self.homework_conditional = self.add_graded_section_to_course('homework2')
def split_setup(self, user_partition_group):
"""
Setup for tests using split_test module. Creates a split_test instance as a child of self.homework_conditional
with 2 verticals in it, and assigns self.student_user to the specified user_partition_group.
The verticals are returned.
"""
vertical_0_url = self.course.id.make_usage_key("vertical", "split_test_vertical_0")
vertical_1_url = self.course.id.make_usage_key("vertical", "split_test_vertical_1")
group_id_to_child = {}
for index, url in enumerate([vertical_0_url, vertical_1_url]):
group_id_to_child[str(index)] = url
split_test = ItemFactory.create(
parent_location=self.homework_conditional.location,
category="split_test",
display_name="Split test",
user_partition_id='0',
group_id_to_child=group_id_to_child,
)
vertical_0 = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 0 vertical",
location=vertical_0_url,
)
vertical_1 = ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 1 vertical",
location=vertical_1_url,
)
# Now add the student to the specified group.
UserCourseTagFactory(
user=self.student_user,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member
value=str(user_partition_group)
)
return vertical_0, vertical_1
def split_different_problems_setup(self, user_partition_group):
"""
Setup for the case where the split test instance contains problems for each group
(so both groups do have graded content, though it is different).
Group 0 has 2 problems, worth 1 and 3 points respectively.
Group 1 has 1 problem, worth 1 point.
This method also assigns self.student_user to the specified user_partition_group and
then submits answers for the problems in section 1, which are visible to all students.
The submitted answers give the student 1 point out of a possible 2 points in the section.
"""
vertical_0, vertical_1 = self.split_setup(user_partition_group)
# Group 0 will have 2 problems in the section, worth a total of 4 points.
self.add_dropdown_to_section(vertical_0.location, 'H2P1_GROUP0', 1).location.html_id()
self.add_dropdown_to_section(vertical_0.location, 'H2P2_GROUP0', 3).location.html_id()
# Group 1 will have 1 problem in the section, worth a total of 1 point.
self.add_dropdown_to_section(vertical_1.location, 'H2P1_GROUP1', 1).location.html_id()
# Submit answers for problem in Section 1, which is visible to all students.
self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Incorrect'})
def test_split_different_problems_group_0(self):
"""
Tests that users who see different problems in a split_test module instance are graded correctly.
This is the test case for a user in user partition group 0.
"""
self.split_different_problems_setup(self.user_partition_group_0)
self.submit_question_answer('H2P1_GROUP0', {'2_1': 'Correct'})
self.submit_question_answer('H2P2_GROUP0', {'2_1': 'Correct', '2_2': 'Incorrect', '2_3': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0, 2.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 3.0])
# Grade percent is .63. Here is the calculation
homework_1_score = 1.0 / 2
homework_2_score = (1.0 + 2.0) / 4
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def test_split_different_problems_group_1(self):
"""
Tests that users who see different problems in a split_test module instance are graded correctly.
This is the test case for a user in user partition group 1.
"""
self.split_different_problems_setup(self.user_partition_group_1)
self.submit_question_answer('H2P1_GROUP1', {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 1.0])
# Grade percent is .75. Here is the calculation
homework_1_score = 1.0 / 2
homework_2_score = 1.0 / 1
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def split_one_group_no_problems_setup(self, user_partition_group):
"""
Setup for the case where the split test instance contains problems on for one group.
Group 0 has no problems.
Group 1 has 1 problem, worth 1 point.
This method also assigns self.student_user to the specified user_partition_group and
then submits answers for the problems in section 1, which are visible to all students.
The submitted answers give the student 2 points out of a possible 2 points in the section.
"""
[_, vertical_1] = self.split_setup(user_partition_group)
# Group 1 will have 1 problem in the section, worth a total of 1 point.
self.add_dropdown_to_section(vertical_1.location, 'H2P1_GROUP1', 1).location.html_id()
self.submit_question_answer('H1P1', {'2_1': 'Correct'})
def test_split_one_group_no_problems_group_0(self):
"""
Tests what happens when a given group has no problems in it (students receive 0 for that section).
"""
self.split_one_group_no_problems_setup(self.user_partition_group_0)
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [])
self.assertEqual(self.earned_hw_scores(), [1.0, 0.0])
# Grade percent is .25. Here is the calculation.
homework_1_score = 1.0 / 2
homework_2_score = 0.0
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
def test_split_one_group_no_problems_group_1(self):
"""
Verifies students in the group that DOES have a problem receive a score for their problem.
"""
self.split_one_group_no_problems_setup(self.user_partition_group_1)
self.submit_question_answer('H2P1_GROUP1', {'2_1': 'Correct'})
self.assertEqual(self.score_for_hw('homework1'), [1.0])
self.assertEqual(self.score_for_hw('homework2'), [1.0])
self.assertEqual(self.earned_hw_scores(), [1.0, 1.0])
# Grade percent is .75. Here is the calculation.
homework_1_score = 1.0 / 2
homework_2_score = 1.0 / 1
self.check_grade_percent(round((homework_1_score + homework_2_score) / 2, 2))
|
martynovp/edx-platform
|
lms/djangoapps/courseware/tests/test_submitting_problems.py
|
Python
|
agpl-3.0
| 51,827
|
# This file is part of RinohType, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
"""
Functions for formatting numbers:
* :func:`format_number`: Format a number according to a given style.
"""
from .paragraph import ParagraphBase, ParagraphStyle
from .style import Style
__all__ = ['NumberStyle', 'Label', 'NumberedParagraph',
'NUMBER', 'CHARACTER_LC', 'CHARACTER_UC', 'ROMAN_LC', 'ROMAN_UC',
'SYMBOL', 'format_number']
NUMBER = 'number'
CHARACTER_LC = 'character'
CHARACTER_UC = 'CHARACTER'
ROMAN_LC = 'roman'
ROMAN_UC = 'ROMAN'
SYMBOL = 'symbol'
def format_number(number, format):
"""Format `number` according the given `format`:
* :const:`NUMBER`: plain arabic number (1, 2, 3, ...)
* :const:`CHARACTER_LC`: lowercase letters (a, b, c, ..., aa, ab, ...)
* :const:`CHARACTER_UC`: uppercase letters (A, B, C, ..., AA, AB, ...)
* :const:`ROMAN_LC`: lowercase Roman (i, ii, iii, iv, v, vi, ...)
* :const:`ROMAN_UC`: uppercase Roman (I, II, III, IV, V, VI, ...)
"""
if format == NUMBER:
return str(number)
elif format == CHARACTER_LC:
string = ''
while number > 0:
number, ordinal = divmod(number, 26)
if ordinal == 0:
ordinal = 26
number -= 1
string = chr(ord('a') - 1 + ordinal) + string
return string
elif format == CHARACTER_UC:
return format_number(number, CHARACTER_LC).upper()
elif format == ROMAN_LC:
return romanize(number).lower()
elif format == ROMAN_UC:
return romanize(number)
elif format == SYMBOL:
return symbolize(number)
else:
raise ValueError("Unknown number format '{}'".format(format))
# romanize by Kay Schluehr - from http://billmill.org/python_roman.html
NUMERALS = (('M', 1000), ('CM', 900), ('D', 500), ('CD', 400),
('C', 100), ('XC', 90), ('L', 50), ('XL', 40),
('X', 10), ('IX', 9), ('V', 5), ('IV', 4), ('I', 1))
def romanize(number):
"""Convert `number` to a Roman numeral."""
roman = []
for numeral, value in NUMERALS:
times, number = divmod(number, value)
roman.append(times * numeral)
return ''.join(roman)
SYMBOLS = ('*', '†', '‡', '§', '‖', '¶', '#')
def symbolize(number):
"""Convert `number` to a foot/endnote symbol."""
repeat, index = divmod(number - 1, len(SYMBOLS))
return SYMBOLS[index] * (1 + repeat)
class LabelStyle(Style):
attributes = {'label_prefix': None,
'label_suffix': None,
'custom_label': False}
class Label(object):
def __init__(self, custom_label=None):
self.custom_label = custom_label
def format_label(self, label, document):
prefix = self.get_style('label_prefix', document) or ''
suffix = self.get_style('label_suffix', document) or ''
return prefix + label + suffix
class NumberStyle(LabelStyle):
attributes = {'number_format': NUMBER}
class NumberedParagraphStyle(ParagraphStyle, NumberStyle):
pass
class NumberedParagraph(ParagraphBase, Label):
style_class = NumberedParagraphStyle
def __init__(self, content, custom_label=None,
id=None, style=None, parent=None):
super().__init__(id=id, style=style, parent=parent)
Label.__init__(self, custom_label=custom_label)
self.content = content
@property
def referenceable(self):
raise NotImplementedError
def number(self, document):
target_id = self.referenceable.get_id(document)
formatted_number = document.get_reference(target_id, NUMBER)
if formatted_number:
return self.format_label(formatted_number, document)
else:
return ''
def text(self, document):
raise NotImplementedError
from .reference import NUMBER
|
beni55/rinohtype
|
rinoh/number.py
|
Python
|
agpl-3.0
| 4,052
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Product Variant Inactive",
"author": "Akretion,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/product-variant",
"license": "AGPL-3",
"category": "Product",
"version": "14.0.1.0.0",
"depends": ["stock"],
"data": [
"views/product_template_view.xml",
"views/product_variant_view.xml",
],
"demo": ["data/product.product.csv"],
}
|
OCA/product-variant
|
product_variant_inactive/__manifest__.py
|
Python
|
agpl-3.0
| 485
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
import re
import unittest
from django.contrib.admin import ModelAdmin
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.models import ADDITION, DELETION, LogEntry
from django.contrib.admin.options import TO_FIELD_VAR
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.utils import quote
from django.contrib.admin.views.main import IS_POPUP_VAR
from django.contrib.auth import REDIRECT_FIELD_NAME, get_permission_codename
from django.contrib.auth.models import Group, Permission, User
from django.contrib.contenttypes.models import ContentType
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core import mail
from django.core.checks import Error
from django.core.files import temp as tempfile
from django.core.urlresolvers import NoReverseMatch, resolve, reverse
from django.forms.utils import ErrorList
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.test import (
SimpleTestCase, TestCase, modify_settings, override_settings,
skipUnlessDBFeature,
)
from django.test.utils import override_script_prefix, patch_logger
from django.utils import formats, six, translation
from django.utils._os import upath
from django.utils.cache import get_max_age
from django.utils.encoding import force_bytes, force_text, iri_to_uri
from django.utils.html import escape
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import parse_qsl, urljoin, urlparse
from . import customadmin
from .admin import CityAdmin, site, site2
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Answer, Article, BarAccount, Book, Category,
Chapter, ChapterXtra1, ChapterXtra2, Character, Child, Choice, City,
Collector, Color, Color2, ComplexSortedPerson, CoverLetter, CustomArticle,
CyclicOne, CyclicTwo, DooHickey, Employee, EmptyModel, ExternalSubscriber,
Fabric, FancyDoodad, FieldOverridePost, FilteredManager, FooAccount,
FoodDelivery, FunkyTag, Gallery, Grommet, Inquisition, Language,
MainPrepopulated, ModelWithStringPrimaryKey, OtherStory, Paper, Parent,
ParentWithDependentChildren, Person, Persona, Picture, Pizza, Plot,
PlotDetails, PluggableSearchPerson, Podcast, Post, PrePopulatedPost, Promo,
Question, Recommendation, Recommender, RelatedPrepopulated, Report,
Restaurant, RowLevelChangePermissionModel, SecretHideout, Section,
ShortMessage, Simple, State, Story, Subscriber, SuperSecretHideout,
SuperVillain, Telegram, TitleTranslation, Topping, UnchangeableObject,
UndeletableObject, UnorderedObject, Villain, Vodcast, Whatsit, Widget,
Worker, WorkHour,
)
ERROR_MESSAGE = "Please enter the correct username and password \
for a staff account. Note that both fields may be case-sensitive."
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls",
USE_I18N=True, USE_L10N=False, LANGUAGE_CODE='en')
class AdminViewBasicTestCase(TestCase):
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
id=101, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='adduser',
first_name='Add', last_name='User', email='auser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u3 = User.objects.create(
id=102, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='changeuser',
first_name='Change', last_name='User', email='cuser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u4 = User.objects.create(
id=103, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='deleteuser',
first_name='Delete', last_name='User', email='duser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u5 = User.objects.create(
id=104, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='joepublic',
first_name='Joe', last_name='Public', email='joepublic@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u6 = User.objects.create(
id=106, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='nostaff',
first_name='No', last_name='Staff', email='nostaff@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
cls.color1 = Color.objects.create(value='Red', warm=True)
cls.color2 = Color.objects.create(value='Orange', warm=True)
cls.color3 = Color.objects.create(value='Blue', warm=False)
cls.color4 = Color.objects.create(value='Green', warm=False)
cls.fab1 = Fabric.objects.create(surface='x')
cls.fab2 = Fabric.objects.create(surface='y')
cls.fab3 = Fabric.objects.create(surface='plain')
cls.b1 = Book.objects.create(name='Book 1')
cls.b2 = Book.objects.create(name='Book 2')
cls.pro1 = Promo.objects.create(name='Promo 1', book=cls.b1)
cls.pro1 = Promo.objects.create(name='Promo 2', book=cls.b2)
cls.chap1 = Chapter.objects.create(title='Chapter 1', content='[ insert contents here ]', book=cls.b1)
cls.chap2 = Chapter.objects.create(title='Chapter 2', content='[ insert contents here ]', book=cls.b1)
cls.chap3 = Chapter.objects.create(title='Chapter 1', content='[ insert contents here ]', book=cls.b2)
cls.chap4 = Chapter.objects.create(title='Chapter 2', content='[ insert contents here ]', book=cls.b2)
cls.cx1 = ChapterXtra1.objects.create(chap=cls.chap1, xtra='ChapterXtra1 1')
cls.cx2 = ChapterXtra1.objects.create(chap=cls.chap3, xtra='ChapterXtra1 2')
# Post data for edit inline
cls.inline_post_data = {
"name": "Test section",
# inline data
"article_set-TOTAL_FORMS": "6",
"article_set-INITIAL_FORMS": "3",
"article_set-MAX_NUM_FORMS": "0",
"article_set-0-id": cls.a1.pk,
# there is no title in database, give one here or formset will fail.
"article_set-0-title": "Norske bostaver æøå skaper problemer",
"article_set-0-content": "<p>Middle content</p>",
"article_set-0-date_0": "2008-03-18",
"article_set-0-date_1": "11:54:58",
"article_set-0-section": cls.s1.pk,
"article_set-1-id": cls.a2.pk,
"article_set-1-title": "Need a title.",
"article_set-1-content": "<p>Oldest content</p>",
"article_set-1-date_0": "2000-03-18",
"article_set-1-date_1": "11:54:58",
"article_set-2-id": cls.a3.pk,
"article_set-2-title": "Need a title.",
"article_set-2-content": "<p>Newest content</p>",
"article_set-2-date_0": "2009-03-18",
"article_set-2-date_1": "11:54:58",
"article_set-3-id": "",
"article_set-3-title": "",
"article_set-3-content": "",
"article_set-3-date_0": "",
"article_set-3-date_1": "",
"article_set-4-id": "",
"article_set-4-title": "",
"article_set-4-content": "",
"article_set-4-date_0": "",
"article_set-4-date_1": "",
"article_set-5-id": "",
"article_set-5-title": "",
"article_set-5-content": "",
"article_set-5-date_0": "",
"article_set-5-date_1": "",
}
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
formats.reset_format_cache()
def assertContentBefore(self, response, text1, text2, failing_msg=None):
"""
Testing utility asserting that text1 appears before text2 in response
content.
"""
self.assertEqual(response.status_code, 200)
self.assertLess(response.content.index(force_bytes(text1)), response.content.index(force_bytes(text2)),
failing_msg)
class AdminViewBasicTest(AdminViewBasicTestCase):
def test_trailing_slash_required(self):
"""
If you leave off the trailing slash, app should redirect and add it.
"""
add_url = reverse('admin:admin_views_article_add')
response = self.client.get(add_url[:-1])
self.assertRedirects(response, add_url, status_code=301)
def test_admin_static_template_tag(self):
"""
Test that admin_static.static is pointing to the collectstatic version
(as django.contrib.collectstatic is in installed apps).
"""
old_url = staticfiles_storage.base_url
staticfiles_storage.base_url = '/test/'
try:
self.assertEqual(static('path'), '/test/path')
finally:
staticfiles_storage.base_url = old_url
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse('admin:admin_views_section_add'))
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_with_GET_args(self):
response = self.client.get(reverse('admin:admin_views_section_add'), {'name': 'My Section'})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'value="My Section"',
msg_prefix="Couldn't find an input with the right value in the response")
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(reverse('admin:admin_views_section_change', args=(self.s1.pk,)))
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET_string_PK(self):
"""
Ensure GET on the change_view works (returns an HTTP 404 error, see
#11191) when passing a string as the PK argument for a model with an
integer PK field.
"""
response = self.client.get(reverse('admin:admin_views_section_change', args=('abc',)))
self.assertEqual(response.status_code, 404)
def test_basic_edit_GET_old_url_redirect(self):
"""
The change URL changed in Django 1.9, but the old one still redirects.
"""
response = self.client.get(
reverse('admin:admin_views_section_change', args=(self.s1.pk,)).replace('change/', '')
)
self.assertRedirects(response, reverse('admin:admin_views_section_change', args=(self.s1.pk,)))
def test_basic_inheritance_GET_string_PK(self):
"""
Ensure GET on the change_view works on inherited models (returns an
HTTP 404 error, see #19951) when passing a string as the PK argument
for a model with an integer PK field.
"""
response = self.client.get(reverse('admin:admin_views_supervillain_change', args=('abc',)))
self.assertEqual(response.status_code, 404)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "Another Section",
# inline data
"article_set-TOTAL_FORMS": "3",
"article_set-INITIAL_FORMS": "0",
"article_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse('admin:admin_views_section_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_popup_add_POST(self):
"""
Ensure http response from a popup is properly escaped.
"""
post_data = {
'_popup': '1',
'title': 'title with a new\nline',
'content': 'some content',
'date_0': '2010-09-10',
'date_1': '14:55:39',
}
response = self.client.post(reverse('admin:admin_views_article_add'), post_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'dismissAddRelatedObjectPopup')
self.assertContains(response, 'title with a new\\u000Aline')
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
response = self.client.post(reverse('admin:admin_views_section_change', args=(self.s1.pk,)), self.inline_post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as(self):
"""
Test "save as".
"""
post_data = self.inline_post_data.copy()
post_data.update({
'_saveasnew': 'Save+as+new',
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-3-section": "1",
"article_set-4-section": "1",
"article_set-5-section": "1",
})
response = self.client.post(reverse('admin:admin_views_section_change', args=(self.s1.pk,)), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as_delete_inline(self):
"""
Should be able to "Save as new" while also deleting an inline.
"""
post_data = self.inline_post_data.copy()
post_data.update({
'_saveasnew': 'Save+as+new',
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-2-DELETE": "1",
"article_set-3-section": "1",
})
response = self.client.post(reverse('admin:admin_views_section_change', args=(self.s1.pk,)), post_data)
self.assertEqual(response.status_code, 302)
# started with 3 articles, one was deleted.
self.assertEqual(Section.objects.latest('id').article_set.count(), 2)
def test_change_list_sorting_callable(self):
"""
Ensure we can sort on a list_display field that is a callable
(column 2 is callable_year in ArticleAdmin)
"""
response = self.client.get(reverse('admin:admin_views_article_changelist'), {'o': 2})
self.assertContentBefore(response, 'Oldest content', 'Middle content',
"Results of sorting on callable are out of order.")
self.assertContentBefore(response, 'Middle content', 'Newest content',
"Results of sorting on callable are out of order.")
def test_change_list_sorting_model(self):
"""
Ensure we can sort on a list_display field that is a Model method
(column 3 is 'model_year' in ArticleAdmin)
"""
response = self.client.get(reverse('admin:admin_views_article_changelist'), {'o': '-3'})
self.assertContentBefore(response, 'Newest content', 'Middle content',
"Results of sorting on Model method are out of order.")
self.assertContentBefore(response, 'Middle content', 'Oldest content',
"Results of sorting on Model method are out of order.")
def test_change_list_sorting_model_admin(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin method
(column 4 is 'modeladmin_year' in ArticleAdmin)
"""
response = self.client.get(reverse('admin:admin_views_article_changelist'), {'o': '4'})
self.assertContentBefore(response, 'Oldest content', 'Middle content',
"Results of sorting on ModelAdmin method are out of order.")
self.assertContentBefore(response, 'Middle content', 'Newest content',
"Results of sorting on ModelAdmin method are out of order.")
def test_change_list_sorting_model_admin_reverse(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin
method in reverse order (i.e. admin_order_field uses the '-' prefix)
(column 6 is 'model_year_reverse' in ArticleAdmin)
"""
response = self.client.get(reverse('admin:admin_views_article_changelist'), {'o': '6'})
self.assertContentBefore(response, '2009', '2008',
"Results of sorting on ModelAdmin method are out of order.")
self.assertContentBefore(response, '2008', '2000',
"Results of sorting on ModelAdmin method are out of order.")
# Let's make sure the ordering is right and that we don't get a
# FieldError when we change to descending order
response = self.client.get(reverse('admin:admin_views_article_changelist'), {'o': '-6'})
self.assertContentBefore(response, '2000', '2008',
"Results of sorting on ModelAdmin method are out of order.")
self.assertContentBefore(response, '2008', '2009',
"Results of sorting on ModelAdmin method are out of order.")
def test_change_list_sorting_multiple(self):
p1 = Person.objects.create(name="Chris", gender=1, alive=True)
p2 = Person.objects.create(name="Chris", gender=2, alive=True)
p3 = Person.objects.create(name="Bob", gender=1, alive=True)
link1 = reverse('admin:admin_views_person_change', args=(p1.pk,))
link2 = reverse('admin:admin_views_person_change', args=(p2.pk,))
link3 = reverse('admin:admin_views_person_change', args=(p3.pk,))
# Sort by name, gender
response = self.client.get(reverse('admin:admin_views_person_changelist'), {'o': '1.2'})
self.assertContentBefore(response, link3, link1)
self.assertContentBefore(response, link1, link2)
# Sort by gender descending, name
response = self.client.get(reverse('admin:admin_views_person_changelist'), {'o': '-2.1'})
self.assertContentBefore(response, link2, link3)
self.assertContentBefore(response, link3, link1)
def test_change_list_sorting_preserve_queryset_ordering(self):
"""
If no ordering is defined in `ModelAdmin.ordering` or in the query
string, then the underlying order of the queryset should not be
changed, even if it is defined in `Modeladmin.get_queryset()`.
Refs #11868, #7309.
"""
p1 = Person.objects.create(name="Amy", gender=1, alive=True, age=80)
p2 = Person.objects.create(name="Bob", gender=1, alive=True, age=70)
p3 = Person.objects.create(name="Chris", gender=2, alive=False, age=60)
link1 = reverse('admin:admin_views_person_change', args=(p1.pk,))
link2 = reverse('admin:admin_views_person_change', args=(p2.pk,))
link3 = reverse('admin:admin_views_person_change', args=(p3.pk,))
response = self.client.get(reverse('admin:admin_views_person_changelist'), {})
self.assertContentBefore(response, link3, link2)
self.assertContentBefore(response, link2, link1)
def test_change_list_sorting_model_meta(self):
# Test ordering on Model Meta is respected
l1 = Language.objects.create(iso='ur', name='Urdu')
l2 = Language.objects.create(iso='ar', name='Arabic')
link1 = reverse('admin:admin_views_language_change', args=(quote(l1.pk),))
link2 = reverse('admin:admin_views_language_change', args=(quote(l2.pk),))
response = self.client.get(reverse('admin:admin_views_language_changelist'), {})
self.assertContentBefore(response, link2, link1)
# Test we can override with query string
response = self.client.get(reverse('admin:admin_views_language_changelist'), {'o': '-1'})
self.assertContentBefore(response, link1, link2)
def test_change_list_sorting_override_model_admin(self):
# Test ordering on Model Admin is respected, and overrides Model Meta
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse('admin:admin_views_podcast_change', args=(p1.pk,))
link2 = reverse('admin:admin_views_podcast_change', args=(p2.pk,))
response = self.client.get(reverse('admin:admin_views_podcast_changelist'), {})
self.assertContentBefore(response, link1, link2)
def test_multiple_sort_same_field(self):
# Check that we get the columns we expect if we have two columns
# that correspond to the same ordering field
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse('admin:admin_views_podcast_change', args=(quote(p1.pk),))
link2 = reverse('admin:admin_views_podcast_change', args=(quote(p2.pk),))
response = self.client.get(reverse('admin:admin_views_podcast_changelist'), {})
self.assertContentBefore(response, link1, link2)
p1 = ComplexSortedPerson.objects.create(name="Bob", age=10)
p2 = ComplexSortedPerson.objects.create(name="Amy", age=20)
link1 = reverse('admin:admin_views_complexsortedperson_change', args=(p1.pk,))
link2 = reverse('admin:admin_views_complexsortedperson_change', args=(p2.pk,))
response = self.client.get(reverse('admin:admin_views_complexsortedperson_changelist'), {})
# Should have 5 columns (including action checkbox col)
self.assertContains(response, '<th scope="col"', count=5)
self.assertContains(response, 'Name')
self.assertContains(response, 'Colored name')
# Check order
self.assertContentBefore(response, 'Name', 'Colored name')
# Check sorting - should be by name
self.assertContentBefore(response, link2, link1)
def test_sort_indicators_admin_order(self):
"""
Ensures that the admin shows default sort indicators for all
kinds of 'ordering' fields: field names, method on the model
admin and model itself, and other callables. See #17252.
"""
models = [(AdminOrderedField, 'adminorderedfield'),
(AdminOrderedModelMethod, 'adminorderedmodelmethod'),
(AdminOrderedAdminMethod, 'adminorderedadminmethod'),
(AdminOrderedCallable, 'adminorderedcallable')]
for model, url in models:
model.objects.create(stuff='The Last Item', order=3)
model.objects.create(stuff='The First Item', order=1)
model.objects.create(stuff='The Middle Item', order=2)
response = self.client.get(reverse('admin:admin_views_%s_changelist' % url), {})
self.assertEqual(response.status_code, 200)
# Should have 3 columns including action checkbox col.
self.assertContains(response, '<th scope="col"', count=3, msg_prefix=url)
# Check if the correct column was selected. 2 is the index of the
# 'order' column in the model admin's 'list_display' with 0 being
# the implicit 'action_checkbox' and 1 being the column 'stuff'.
self.assertEqual(response.context['cl'].get_ordering_field_columns(), {2: 'asc'})
# Check order of records.
self.assertContentBefore(response, 'The First Item', 'The Middle Item')
self.assertContentBefore(response, 'The Middle Item', 'The Last Item')
def test_limited_filter(self):
"""Ensure admin changelist filters do not contain objects excluded via limit_choices_to.
This also tests relation-spanning filters (e.g. 'color__value').
"""
response = self.client.get(reverse('admin:admin_views_thing_changelist'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div id="changelist-filter">',
msg_prefix="Expected filter not found in changelist view")
self.assertNotContains(response, '<a href="?color__id__exact=3">Blue</a>',
msg_prefix="Changelist filter not correctly limited by limit_choices_to")
def test_relation_spanning_filters(self):
changelist_url = reverse('admin:admin_views_chapterxtra1_changelist')
response = self.client.get(changelist_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div id="changelist-filter">')
filters = {
'chap__id__exact': dict(
values=[c.id for c in Chapter.objects.all()],
test=lambda obj, value: obj.chap.id == value),
'chap__title': dict(
values=[c.title for c in Chapter.objects.all()],
test=lambda obj, value: obj.chap.title == value),
'chap__book__id__exact': dict(
values=[b.id for b in Book.objects.all()],
test=lambda obj, value: obj.chap.book.id == value),
'chap__book__name': dict(
values=[b.name for b in Book.objects.all()],
test=lambda obj, value: obj.chap.book.name == value),
'chap__book__promo__id__exact': dict(
values=[p.id for p in Promo.objects.all()],
test=lambda obj, value: obj.chap.book.promo_set.filter(id=value).exists()),
'chap__book__promo__name': dict(
values=[p.name for p in Promo.objects.all()],
test=lambda obj, value: obj.chap.book.promo_set.filter(name=value).exists()),
}
for filter_path, params in filters.items():
for value in params['values']:
query_string = urlencode({filter_path: value})
# ensure filter link exists
self.assertContains(response, '<a href="?%s">' % query_string)
# ensure link works
filtered_response = self.client.get('%s?%s' % (changelist_url, query_string))
self.assertEqual(filtered_response.status_code, 200)
# ensure changelist contains only valid objects
for obj in filtered_response.context['cl'].queryset.all():
self.assertTrue(params['test'](obj, value))
def test_incorrect_lookup_parameters(self):
"""Ensure incorrect lookup parameters are handled gracefully."""
changelist_url = reverse('admin:admin_views_thing_changelist')
response = self.client.get(changelist_url, {'notarealfield': '5'})
self.assertRedirects(response, '%s?e=1' % changelist_url)
# Spanning relationships through a nonexistent related object (Refs #16716)
response = self.client.get(changelist_url, {'notarealfield__whatever': '5'})
self.assertRedirects(response, '%s?e=1' % changelist_url)
response = self.client.get(changelist_url, {'color__id__exact': 'StringNotInteger!'})
self.assertRedirects(response, '%s?e=1' % changelist_url)
# Regression test for #18530
response = self.client.get(changelist_url, {'pub_date__gte': 'foo'})
self.assertRedirects(response, '%s?e=1' % changelist_url)
def test_isnull_lookups(self):
"""Ensure is_null is handled correctly."""
Article.objects.create(title="I Could Go Anywhere", content="Versatile", date=datetime.datetime.now())
changelist_url = reverse('admin:admin_views_article_changelist')
response = self.client.get(changelist_url)
self.assertContains(response, '4 articles')
response = self.client.get(changelist_url, {'section__isnull': 'false'})
self.assertContains(response, '3 articles')
response = self.client.get(changelist_url, {'section__isnull': '0'})
self.assertContains(response, '3 articles')
response = self.client.get(changelist_url, {'section__isnull': 'true'})
self.assertContains(response, '1 article')
response = self.client.get(changelist_url, {'section__isnull': '1'})
self.assertContains(response, '1 article')
def test_logout_and_password_change_URLs(self):
response = self.client.get(reverse('admin:admin_views_article_changelist'))
self.assertContains(response, '<a href="%s">' % reverse('admin:logout'))
self.assertContains(response, '<a href="%s">' % reverse('admin:password_change'))
def test_named_group_field_choices_change_list(self):
"""
Ensures the admin changelist shows correct values in the relevant column
for rows corresponding to instances of a model in which a named group
has been used in the choices option of a field.
"""
link1 = reverse('admin:admin_views_fabric_change', args=(self.fab1.pk,))
link2 = reverse('admin:admin_views_fabric_change', args=(self.fab2.pk,))
response = self.client.get(reverse('admin:admin_views_fabric_changelist'))
fail_msg = "Changelist table isn't showing the right human-readable values set by a model field 'choices' option named group."
self.assertContains(response, '<a href="%s">Horizontal</a>' % link1, msg_prefix=fail_msg, html=True)
self.assertContains(response, '<a href="%s">Vertical</a>' % link2, msg_prefix=fail_msg, html=True)
def test_named_group_field_choices_filter(self):
"""
Ensures the filter UI shows correctly when at least one named group has
been used in the choices option of a model field.
"""
response = self.client.get(reverse('admin:admin_views_fabric_changelist'))
fail_msg = "Changelist filter isn't showing options contained inside a model field 'choices' option named group."
self.assertContains(response, '<div id="changelist-filter">')
self.assertContains(response,
'<a href="?surface__exact=x">Horizontal</a>', msg_prefix=fail_msg, html=True)
self.assertContains(response,
'<a href="?surface__exact=y">Vertical</a>', msg_prefix=fail_msg, html=True)
def test_change_list_null_boolean_display(self):
Post.objects.create(public=None)
response = self.client.get(reverse('admin:admin_views_post_changelist'))
self.assertContains(response, 'icon-unknown.svg')
def test_i18n_language_non_english_default(self):
"""
Check if the JavaScript i18n view returns an empty language catalog
if the default language is non-English but the selected language
is English. See #13388 and #3594 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), translation.override('en-us'):
response = self.client.get(reverse('admin:jsi18n'))
self.assertNotContains(response, 'Choisir une heure')
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), translation.override('none'):
response = self.client.get(reverse('admin:jsi18n'))
self.assertContains(response, 'Choisir une heure')
def test_L10N_deactivated(self):
"""
Check if L10N is deactivated, the JavaScript i18n view doesn't
return localized date/time formats. Refs #14824.
"""
with self.settings(LANGUAGE_CODE='ru', USE_L10N=False), translation.override('none'):
response = self.client.get(reverse('admin:jsi18n'))
self.assertNotContains(response, '%d.%m.%Y %H:%M:%S')
self.assertContains(response, '%Y-%m-%d %H:%M:%S')
def test_disallowed_filtering(self):
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as calls:
response = self.client.get(
"%s?owner__email__startswith=fuzzy" % reverse('admin:admin_views_album_changelist')
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
# Filters are allowed if explicitly included in list_filter
response = self.client.get("%s?color__value__startswith=red" % reverse('admin:admin_views_thing_changelist'))
self.assertEqual(response.status_code, 200)
response = self.client.get("%s?color__value=red" % reverse('admin:admin_views_thing_changelist'))
self.assertEqual(response.status_code, 200)
# Filters should be allowed if they involve a local field without the
# need to whitelist them in list_filter or date_hierarchy.
response = self.client.get("%s?age__gt=30" % reverse('admin:admin_views_person_changelist'))
self.assertEqual(response.status_code, 200)
e1 = Employee.objects.create(name='Anonymous', gender=1, age=22, alive=True, code='123')
e2 = Employee.objects.create(name='Visitor', gender=2, age=19, alive=True, code='124')
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e1)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e2)
response = self.client.get(reverse('admin:admin_views_workhour_changelist'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'employee__person_ptr__exact')
response = self.client.get("%s?employee__person_ptr__exact=%d" % (
reverse('admin:admin_views_workhour_changelist'), e1.pk)
)
self.assertEqual(response.status_code, 200)
def test_disallowed_to_field(self):
with patch_logger('django.security.DisallowedModelAdminToField', 'error') as calls:
response = self.client.get(reverse('admin:admin_views_section_changelist'), {TO_FIELD_VAR: 'missing_field'})
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
# Specifying a field that is not referred by any other model registered
# to this admin site should raise an exception.
with patch_logger('django.security.DisallowedModelAdminToField', 'error') as calls:
response = self.client.get(reverse('admin:admin_views_section_changelist'), {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
# #23839 - Primary key should always be allowed, even if the referenced model isn't registered.
response = self.client.get(reverse('admin:admin_views_notreferenced_changelist'), {TO_FIELD_VAR: 'id'})
self.assertEqual(response.status_code, 200)
# #23915 - Specifying a field referenced by another model though a m2m should be allowed.
response = self.client.get(reverse('admin:admin_views_recipe_changelist'), {TO_FIELD_VAR: 'rname'})
self.assertEqual(response.status_code, 200)
# #23604, #23915 - Specifying a field referenced through a reverse m2m relationship should be allowed.
response = self.client.get(reverse('admin:admin_views_ingredient_changelist'), {TO_FIELD_VAR: 'iname'})
self.assertEqual(response.status_code, 200)
# #23329 - Specifying a field that is not referred by any other model directly registered
# to this admin site but registered through inheritance should be allowed.
response = self.client.get(reverse('admin:admin_views_referencedbyparent_changelist'), {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 200)
# #23431 - Specifying a field that is only referred to by a inline of a registered
# model should be allowed.
response = self.client.get(reverse('admin:admin_views_referencedbyinline_changelist'), {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 200)
# We also want to prevent the add, change, and delete views from
# leaking a disallowed field value.
with patch_logger('django.security.DisallowedModelAdminToField', 'error') as calls:
response = self.client.post(reverse('admin:admin_views_section_add'), {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
section = Section.objects.create()
with patch_logger('django.security.DisallowedModelAdminToField', 'error') as calls:
response = self.client.post(reverse('admin:admin_views_section_change', args=(section.pk,)), {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
with patch_logger('django.security.DisallowedModelAdminToField', 'error') as calls:
response = self.client.post(reverse('admin:admin_views_section_delete', args=(section.pk,)), {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
def test_allowed_filtering_15103(self):
"""
Regressions test for ticket 15103 - filtering on fields defined in a
ForeignKey 'limit_choices_to' should be allowed, otherwise raw_id_fields
can break.
"""
# Filters should be allowed if they are defined on a ForeignKey pointing to this model
response = self.client.get("%s?leader__name=Palin&leader__age=27" % reverse('admin:admin_views_inquisition_changelist'))
self.assertEqual(response.status_code, 200)
def test_popup_dismiss_related(self):
"""
Regression test for ticket 20664 - ensure the pk is properly quoted.
"""
actor = Actor.objects.create(name="Palin", age=27)
response = self.client.get("%s?%s" % (reverse('admin:admin_views_actor_changelist'), IS_POPUP_VAR))
self.assertContains(response, "opener.dismissRelatedLookupPopup(window, '%s')" % actor.pk)
def test_hide_change_password(self):
"""
Tests if the "change password" link in the admin is hidden if the User
does not have a usable password set.
(against 9bea85795705d015cdadc82c68b99196a8554f5c)
"""
user = User.objects.get(username='super')
user.set_unusable_password()
user.save()
response = self.client.get(reverse('admin:index'))
self.assertNotContains(response, reverse('admin:password_change'),
msg_prefix='The "change password" link should not be displayed if a user does not have a usable password.')
def test_change_view_with_show_delete_extra_context(self):
"""
Ensured that the 'show_delete' context variable in the admin's change
view actually controls the display of the delete button.
Refs #10057.
"""
instance = UndeletableObject.objects.create(name='foo')
response = self.client.get(reverse('admin:admin_views_undeletableobject_change', args=(instance.pk,)))
self.assertNotContains(response, 'deletelink')
def test_allows_attributeerror_to_bubble_up(self):
"""
Ensure that AttributeErrors are allowed to bubble when raised inside
a change list view.
Requires a model to be created so there's something to be displayed
Refs: #16655, #18593, and #18747
"""
Simple.objects.create()
with self.assertRaises(AttributeError):
self.client.get(reverse('admin:admin_views_simple_changelist'))
def test_changelist_with_no_change_url(self):
"""
ModelAdmin.changelist_view shouldn't result in a NoReverseMatch if url
for change_view is removed from get_urls
Regression test for #20934
"""
UnchangeableObject.objects.create()
response = self.client.get(reverse('admin:admin_views_unchangeableobject_changelist'))
self.assertEqual(response.status_code, 200)
# Check the format of the shown object -- shouldn't contain a change link
self.assertContains(response, '<th class="field-__str__">UnchangeableObject object</th>', html=True)
def test_invalid_appindex_url(self):
"""
#21056 -- URL reversing shouldn't work for nonexistent apps.
"""
good_url = '/test_admin/admin/admin_views/'
confirm_good_url = reverse('admin:app_list',
kwargs={'app_label': 'admin_views'})
self.assertEqual(good_url, confirm_good_url)
with self.assertRaises(NoReverseMatch):
reverse('admin:app_list', kwargs={'app_label': 'this_should_fail'})
with self.assertRaises(NoReverseMatch):
reverse('admin:app_list', args=('admin_views2',))
def test_resolve_admin_views(self):
index_match = resolve('/test_admin/admin4/')
list_match = resolve('/test_admin/admin4/auth/user/')
self.assertIs(index_match.func.admin_site, customadmin.simple_site)
self.assertIsInstance(list_match.func.model_admin, customadmin.CustomPwdTemplateUserAdmin)
def test_proxy_model_content_type_is_used_for_log_entries(self):
"""
Log entries for proxy models should have the proxy model's content
type.
Regression test for #21084.
"""
color2_content_type = ContentType.objects.get_for_model(Color2, for_concrete_model=False)
# add
color2_add_url = reverse('admin:admin_views_color2_add')
self.client.post(color2_add_url, {'value': 'orange'})
color2_addition_log = LogEntry.objects.all()[0]
self.assertEqual(color2_content_type, color2_addition_log.content_type)
# change
color_id = color2_addition_log.object_id
color2_change_url = reverse('admin:admin_views_color2_change', args=(color_id,))
self.client.post(color2_change_url, {'value': 'blue'})
color2_change_log = LogEntry.objects.all()[0]
self.assertEqual(color2_content_type, color2_change_log.content_type)
# delete
color2_delete_url = reverse('admin:admin_views_color2_delete', args=(color_id,))
self.client.post(color2_delete_url)
color2_delete_log = LogEntry.objects.all()[0]
self.assertEqual(color2_content_type, color2_delete_log.content_type)
def test_adminsite_display_site_url(self):
"""
#13749 - Admin should display link to front-end site 'View site'
"""
url = reverse('admin:index')
response = self.client.get(url)
self.assertEqual(response.context['site_url'], '/my-site-url/')
self.assertContains(response, '<a href="/my-site-url/">View site</a>')
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# Put this app's and the shared tests templates dirs in DIRS to take precedence
# over the admin's templates dir.
'DIRS': [
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
os.path.join(os.path.dirname(os.path.dirname(upath(__file__))), 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}])
class AdminCustomTemplateTests(AdminViewBasicTestCase):
def test_custom_model_admin_templates(self):
# Test custom change list template with custom extra context
response = self.client.get(reverse('admin:admin_views_customarticle_changelist'))
self.assertContains(response, "var hello = 'Hello!';")
self.assertTemplateUsed(response, 'custom_admin/change_list.html')
# Test custom add form template
response = self.client.get(reverse('admin:admin_views_customarticle_add'))
self.assertTemplateUsed(response, 'custom_admin/add_form.html')
# Add an article so we can test delete, change, and history views
post = self.client.post(reverse('admin:admin_views_customarticle_add'), {
'content': '<p>great article</p>',
'date_0': '2008-03-18',
'date_1': '10:54:39'
})
self.assertRedirects(post, reverse('admin:admin_views_customarticle_changelist'))
self.assertEqual(CustomArticle.objects.all().count(), 1)
article_pk = CustomArticle.objects.all()[0].pk
# Test custom delete, change, and object history templates
# Test custom change form template
response = self.client.get(reverse('admin:admin_views_customarticle_change', args=(article_pk,)))
self.assertTemplateUsed(response, 'custom_admin/change_form.html')
response = self.client.get(reverse('admin:admin_views_customarticle_delete', args=(article_pk,)))
self.assertTemplateUsed(response, 'custom_admin/delete_confirmation.html')
response = self.client.post(reverse('admin:admin_views_customarticle_changelist'), data={
'index': 0,
'action': ['delete_selected'],
'_selected_action': ['1'],
})
self.assertTemplateUsed(response, 'custom_admin/delete_selected_confirmation.html')
response = self.client.get(reverse('admin:admin_views_customarticle_history', args=(article_pk,)))
self.assertTemplateUsed(response, 'custom_admin/object_history.html')
def test_extended_bodyclass_template_change_form(self):
"""
Ensure that the admin/change_form.html template uses block.super in the
bodyclass block.
"""
response = self.client.get(reverse('admin:admin_views_section_add'))
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_template_change_password(self):
"""
Ensure that the auth/user/change_password.html template uses block
super in the bodyclass block.
"""
user = User.objects.get(username='super')
response = self.client.get(reverse('admin:auth_user_password_change', args=(user.id,)))
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_template_index(self):
"""
Ensure that the admin/index.html template uses block.super in the
bodyclass block.
"""
response = self.client.get(reverse('admin:index'))
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_change_list(self):
"""
Ensure that the admin/change_list.html' template uses block.super
in the bodyclass block.
"""
response = self.client.get(reverse('admin:admin_views_article_changelist'))
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_template_login(self):
"""
Ensure that the admin/login.html template uses block.super in the
bodyclass block.
"""
self.client.logout()
response = self.client.get(reverse('admin:login'))
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_template_delete_confirmation(self):
"""
Ensure that the admin/delete_confirmation.html template uses
block.super in the bodyclass block.
"""
group = Group.objects.create(name="foogroup")
response = self.client.get(reverse('admin:auth_group_delete', args=(group.id,)))
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_template_delete_selected_confirmation(self):
"""
Ensure that the admin/delete_selected_confirmation.html template uses
block.super in bodyclass block.
"""
group = Group.objects.create(name="foogroup")
post_data = {
'action': 'delete_selected',
'selected_across': '0',
'index': '0',
'_selected_action': group.id
}
response = self.client.post(reverse('admin:auth_group_changelist'), post_data)
self.assertEqual(response.context['site_header'], 'Django administration')
self.assertContains(response, 'bodyclass_consistency_check ')
def test_filter_with_custom_template(self):
"""
Ensure that one can use a custom template to render an admin filter.
Refs #17515.
"""
response = self.client.get(reverse('admin:admin_views_color2_changelist'))
self.assertTemplateUsed(response, 'custom_filter_template.html')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminViewFormUrlTest(TestCase):
current_app = "admin3"
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
id=101, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='adduser',
first_name='Add', last_name='User', email='auser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u3 = User.objects.create(
id=102, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='changeuser',
first_name='Change', last_name='User', email='cuser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u4 = User.objects.create(
id=103, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='deleteuser',
first_name='Delete', last_name='User', email='duser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u5 = User.objects.create(
id=104, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='joepublic',
first_name='Joe', last_name='Public', email='joepublic@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u6 = User.objects.create(
id=106, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='nostaff',
first_name='No', last_name='Staff', email='nostaff@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
def setUp(self):
self.client.login(username='super', password='secret')
def test_change_form_URL_has_correct_value(self):
"""
Tests whether change_view has form_url in response.context
"""
response = self.client.get(
reverse('admin:admin_views_section_change', args=(self.s1.pk,), current_app=self.current_app)
)
self.assertIn('form_url', response.context, msg='form_url not present in response.context')
self.assertEqual(response.context['form_url'], 'pony')
def test_initial_data_can_be_overridden(self):
"""
Tests that the behavior for setting initial
form data can be overridden in the ModelAdmin class.
Usually, the initial value is set via the GET params.
"""
response = self.client.get(
reverse('admin:admin_views_restaurant_add', current_app=self.current_app),
{'name': 'test_value'}
)
# this would be the usual behaviour
self.assertNotContains(response, 'value="test_value"')
# this is the overridden behaviour
self.assertContains(response, 'value="overridden_value"')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminJavaScriptTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_js_minified_only_if_debug_is_false(self):
"""
Ensure that the minified versions of the JS files are only used when
DEBUG is False.
Refs #17521.
"""
with override_settings(DEBUG=False):
response = self.client.get(reverse('admin:admin_views_section_add'))
self.assertNotContains(response, 'vendor/jquery/jquery.js')
self.assertContains(response, 'vendor/jquery/jquery.min.js')
self.assertNotContains(response, 'prepopulate.js')
self.assertContains(response, 'prepopulate.min.js')
self.assertNotContains(response, 'actions.js')
self.assertContains(response, 'actions.min.js')
self.assertNotContains(response, 'collapse.js')
self.assertContains(response, 'collapse.min.js')
self.assertNotContains(response, 'inlines.js')
self.assertContains(response, 'inlines.min.js')
with override_settings(DEBUG=True):
response = self.client.get(reverse('admin:admin_views_section_add'))
self.assertContains(response, 'vendor/jquery/jquery.js')
self.assertNotContains(response, 'vendor/jquery/jquery.min.js')
self.assertContains(response, 'prepopulate.js')
self.assertNotContains(response, 'prepopulate.min.js')
self.assertContains(response, 'actions.js')
self.assertNotContains(response, 'actions.min.js')
self.assertContains(response, 'collapse.js')
self.assertNotContains(response, 'collapse.min.js')
self.assertContains(response, 'inlines.js')
self.assertNotContains(response, 'inlines.min.js')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class SaveAsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.per1 = Person.objects.create(name='John Mauchly', gender=1, alive=True)
def setUp(self):
self.client.login(username='super', password='secret')
def test_save_as_duplication(self):
"""Ensure save as actually creates a new person"""
post_data = {'_saveasnew': '', 'name': 'John M', 'gender': 1, 'age': 42}
self.client.post(reverse('admin:admin_views_person_change', args=(self.per1.pk,)), post_data)
self.assertEqual(len(Person.objects.filter(name='John M')), 1)
self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)
def test_save_as_new_with_validation_errors(self):
"""
Ensure that when you click "Save as new" and have a validation error,
you only see the "Save as new" button and not the other save buttons,
and that only the "Save as" button is visible.
"""
response = self.client.post(reverse('admin:admin_views_person_change', args=(self.per1.pk,)), {
'_saveasnew': '',
'gender': 'invalid',
'_addanother': 'fail',
})
self.assertContains(response, 'Please correct the errors below.')
self.assertFalse(response.context['show_save_and_add_another'])
self.assertFalse(response.context['show_save_and_continue'])
self.assertTrue(response.context['show_save_as_new'])
def test_save_as_new_with_validation_errors_with_inlines(self):
parent = Parent.objects.create(name='Father')
child = Child.objects.create(parent=parent, name='Child')
response = self.client.post(reverse('admin:admin_views_parent_change', args=(parent.pk,)), {
'_saveasnew': 'Save as new',
'child_set-0-parent': parent.pk,
'child_set-0-id': child.pk,
'child_set-0-name': 'Child',
'child_set-INITIAL_FORMS': 1,
'child_set-MAX_NUM_FORMS': 1000,
'child_set-MIN_NUM_FORMS': 0,
'child_set-TOTAL_FORMS': 4,
'name': '_invalid',
})
self.assertContains(response, 'Please correct the error below.')
self.assertFalse(response.context['show_save_and_add_another'])
self.assertFalse(response.context['show_save_and_continue'])
self.assertTrue(response.context['show_save_as_new'])
def test_save_as_new_with_inlines_with_validation_errors(self):
parent = Parent.objects.create(name='Father')
child = Child.objects.create(parent=parent, name='Child')
response = self.client.post(reverse('admin:admin_views_parent_change', args=(parent.pk,)), {
'_saveasnew': 'Save as new',
'child_set-0-parent': parent.pk,
'child_set-0-id': child.pk,
'child_set-0-name': '_invalid',
'child_set-INITIAL_FORMS': 1,
'child_set-MAX_NUM_FORMS': 1000,
'child_set-MIN_NUM_FORMS': 0,
'child_set-TOTAL_FORMS': 4,
'name': 'Father',
})
self.assertContains(response, 'Please correct the error below.')
self.assertFalse(response.context['show_save_and_add_another'])
self.assertFalse(response.context['show_save_and_continue'])
self.assertTrue(response.context['show_save_as_new'])
@override_settings(ROOT_URLCONF="admin_views.urls")
class CustomModelAdminTest(AdminViewBasicTestCase):
def test_custom_admin_site_login_form(self):
self.client.logout()
response = self.client.get(reverse('admin2:index'), follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
login = self.client.post(reverse('admin2:login'), {
REDIRECT_FIELD_NAME: reverse('admin2:index'),
'username': 'customform',
'password': 'secret',
}, follow=True)
self.assertIsInstance(login, TemplateResponse)
self.assertEqual(login.status_code, 200)
self.assertContains(login, 'custom form error')
self.assertContains(login, 'path/to/media.css')
def test_custom_admin_site_login_template(self):
self.client.logout()
response = self.client.get(reverse('admin2:index'), follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/login.html')
self.assertContains(response, 'Hello from a custom login template')
def test_custom_admin_site_logout_template(self):
response = self.client.get(reverse('admin2:logout'))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/logout.html')
self.assertContains(response, 'Hello from a custom logout template')
def test_custom_admin_site_index_view_and_template(self):
try:
response = self.client.get(reverse('admin2:index'))
except TypeError:
self.fail('AdminSite.index_template should accept a list of template paths')
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/index.html')
self.assertContains(response, 'Hello from a custom index template *bar*')
def test_custom_admin_site_app_index_view_and_template(self):
response = self.client.get(reverse('admin2:app_list', args=('admin_views',)))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/app_index.html')
self.assertContains(response, 'Hello from a custom app_index template')
def test_custom_admin_site_password_change_template(self):
response = self.client.get(reverse('admin2:password_change'))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/password_change_form.html')
self.assertContains(response, 'Hello from a custom password change form template')
def test_custom_admin_site_password_change_with_extra_context(self):
response = self.client.get(reverse('admin2:password_change'))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/password_change_form.html')
self.assertContains(response, 'eggs')
def test_custom_admin_site_password_change_done_template(self):
response = self.client.get(reverse('admin2:password_change_done'))
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/password_change_done.html')
self.assertContains(response, 'Hello from a custom password change done template')
def test_custom_admin_site_view(self):
self.client.login(username='super', password='secret')
response = self.client.get(reverse('admin2:my_view'))
self.assertEqual(response.content, b"Django is a magical pony!")
def test_pwd_change_custom_template(self):
self.client.login(username='super', password='secret')
su = User.objects.get(username='super')
try:
response = self.client.get(
reverse('admin4:auth_user_password_change', args=(su.pk,))
)
except TypeError:
self.fail('ModelAdmin.change_user_password_template should accept a list of template paths')
self.assertEqual(response.status_code, 200)
def get_perm(Model, perm):
"""Return the permission object, for the Model"""
ct = ContentType.objects.get_for_model(Model)
return Permission.objects.get(content_type=ct, codename=perm)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminViewPermissionsTest(TestCase):
"""Tests for Admin Views Permissions."""
@classmethod
def setUpTestData(cls):
super(AdminViewPermissionsTest, cls).setUpTestData()
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
id=101, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='adduser',
first_name='Add', last_name='User', email='auser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u3 = User.objects.create(
id=102, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='changeuser',
first_name='Change', last_name='User', email='cuser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u4 = User.objects.create(
id=103, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='deleteuser',
first_name='Delete', last_name='User', email='duser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u5 = User.objects.create(
id=104, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='joepublic',
first_name='Joe', last_name='Public', email='joepublic@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u6 = User.objects.create(
id=106, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='nostaff',
first_name='No', last_name='Staff', email='nostaff@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
# Setup permissions, for our users who can add, change, and delete.
opts = Article._meta
# User who can add Articles
cls.u2.user_permissions.add(get_perm(Article, get_permission_codename('add', opts)))
# User who can change Articles
cls.u3.user_permissions.add(get_perm(Article, get_permission_codename('change', opts)))
cls.u6.user_permissions.add(get_perm(Article, get_permission_codename('change', opts)))
# User who can delete Articles
cls.u4.user_permissions.add(get_perm(Article, get_permission_codename('delete', opts)))
cls.u4.user_permissions.add(get_perm(Section, get_permission_codename('delete', Section._meta)))
# login POST dicts
cls.index_url = reverse('admin:index')
cls.super_login = {
REDIRECT_FIELD_NAME: cls.index_url,
'username': 'super',
'password': 'secret',
}
cls.super_email_login = {
REDIRECT_FIELD_NAME: cls.index_url,
'username': 'super@example.com',
'password': 'secret',
}
cls.super_email_bad_login = {
REDIRECT_FIELD_NAME: cls.index_url,
'username': 'super@example.com',
'password': 'notsecret',
}
cls.adduser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
'username': 'adduser',
'password': 'secret',
}
cls.changeuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
'username': 'changeuser',
'password': 'secret',
}
cls.deleteuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
'username': 'deleteuser',
'password': 'secret',
}
cls.nostaff_login = {
REDIRECT_FIELD_NAME: reverse('has_permission_admin:index'),
'username': 'nostaff',
'password': 'secret',
}
cls.joepublic_login = {
REDIRECT_FIELD_NAME: cls.index_url,
'username': 'joepublic',
'password': 'secret',
}
cls.no_username_login = {
REDIRECT_FIELD_NAME: cls.index_url,
'password': 'secret',
}
def test_login(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the original url.
Unsuccessful attempts will continue to render the login page with
a 200 status code.
"""
login_url = '%s?next=%s' % (reverse('admin:login'), reverse('admin:index'))
# Super User
response = self.client.get(self.index_url)
self.assertRedirects(response, login_url)
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.get(reverse('admin:logout'))
# Test if user enters email address
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# only correct passwords get a username hint
login = self.client.post(login_url, self.super_email_bad_login)
self.assertContains(login, ERROR_MESSAGE)
new_user = User(username='jondoe', password='secret', email='super@example.com')
new_user.save()
# check to ensure if there are multiple email addresses a user doesn't get a 500
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# Add User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.adduser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.get(reverse('admin:logout'))
# Change User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.changeuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.get(reverse('admin:logout'))
# Delete User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.deleteuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.get(reverse('admin:logout'))
# Regular User should not be able to login.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.joepublic_login)
self.assertEqual(login.status_code, 200)
self.assertContains(login, ERROR_MESSAGE)
# Requests without username should not return 500 errors.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.no_username_login)
self.assertEqual(login.status_code, 200)
form = login.context[0].get('form')
self.assertEqual(form.errors['username'][0], 'This field is required.')
def test_login_redirect_for_direct_get(self):
"""
Login redirect should be to the admin index page when going directly to
/admin/login/.
"""
response = self.client.get(reverse('admin:login'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[REDIRECT_FIELD_NAME], reverse('admin:index'))
def test_login_has_permission(self):
# Regular User should not be able to login.
response = self.client.get(reverse('has_permission_admin:index'))
self.assertEqual(response.status_code, 302)
login = self.client.post(reverse('has_permission_admin:login'), self.joepublic_login)
self.assertEqual(login.status_code, 200)
self.assertContains(login, 'permission denied')
# User with permissions should be able to login.
response = self.client.get(reverse('has_permission_admin:index'))
self.assertEqual(response.status_code, 302)
login = self.client.post(reverse('has_permission_admin:login'), self.nostaff_login)
self.assertRedirects(login, reverse('has_permission_admin:index'))
self.assertFalse(login.context)
self.client.get(reverse('has_permission_admin:logout'))
# Staff should be able to login.
response = self.client.get(reverse('has_permission_admin:index'))
self.assertEqual(response.status_code, 302)
login = self.client.post(reverse('has_permission_admin:login'), {
REDIRECT_FIELD_NAME: reverse('has_permission_admin:index'),
'username': 'deleteuser',
'password': 'secret',
})
self.assertRedirects(login, reverse('has_permission_admin:index'))
self.assertFalse(login.context)
self.client.get(reverse('has_permission_admin:logout'))
def test_login_successfully_redirects_to_original_URL(self):
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
query_string = 'the-answer=42'
redirect_url = '%s?%s' % (self.index_url, query_string)
new_next = {REDIRECT_FIELD_NAME: redirect_url}
post_data = self.super_login.copy()
post_data.pop(REDIRECT_FIELD_NAME)
login = self.client.post(
'%s?%s' % (reverse('admin:login'), urlencode(new_next)),
post_data)
self.assertRedirects(login, redirect_url)
def test_double_login_is_not_allowed(self):
"""Regression test for #19327"""
login_url = '%s?next=%s' % (reverse('admin:login'), reverse('admin:index'))
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with non-admin user fails
login = self.client.post(login_url, self.joepublic_login)
self.assertEqual(login.status_code, 200)
self.assertContains(login, ERROR_MESSAGE)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with admin user while already logged in
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.get(reverse('admin:logout'))
def test_login_page_notice_for_non_staff_users(self):
"""
A logged-in non-staff user trying to access the admin index should be
presented with the login page and a hint indicating that the current
user doesn't have access to it.
"""
hint_template = 'You are authenticated as {}'
# Anonymous user should not be shown the hint
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'login-form')
self.assertNotContains(response, hint_template.format(''), status_code=200)
# Non-staff user should be shown the hint
self.client.login(**self.nostaff_login)
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'login-form')
self.assertContains(response, hint_template.format(self.u6.username), status_code=200)
def test_add_view(self):
"""Test add view restricts access and actually adds items."""
login_url = '%s?next=%s' % (reverse('admin:login'), reverse('admin:index'))
add_dict = {'title': 'Døm ikke',
'content': '<p>great article</p>',
'date_0': '2008-03-18', 'date_1': '10:54:39',
'section': self.s1.pk}
# Change User should not have access to add articles
self.client.get(self.index_url)
self.client.post(login_url, self.changeuser_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
response = self.client.get(reverse('admin:admin_views_article_add'))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse('admin:admin_views_article_add'), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.get(reverse('admin:logout'))
# Add user may login and POST to add view, then redirect to admin root
self.client.get(self.index_url)
self.client.post(login_url, self.adduser_login)
addpage = self.client.get(reverse('admin:admin_views_article_add'))
change_list_link = '› <a href="%s">Articles</a>' % reverse('admin:admin_views_article_changelist')
self.assertNotContains(addpage, change_list_link,
msg_prefix='User restricted to add permission is given link to change list view in breadcrumbs.')
post = self.client.post(reverse('admin:admin_views_article_add'), add_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 4)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a created object')
self.client.get(reverse('admin:logout'))
# Check that the addition was logged correctly
addition_log = LogEntry.objects.all()[0]
new_article = Article.objects.last()
article_ct = ContentType.objects.get_for_model(Article)
self.assertEqual(addition_log.user_id, self.u2.pk)
self.assertEqual(addition_log.content_type_id, article_ct.pk)
self.assertEqual(addition_log.object_id, str(new_article.pk))
self.assertEqual(addition_log.object_repr, "Døm ikke")
self.assertEqual(addition_log.action_flag, ADDITION)
self.assertEqual(addition_log.change_message, "Added.")
# Super can add too, but is redirected to the change list view
self.client.get(self.index_url)
self.client.post(login_url, self.super_login)
addpage = self.client.get(reverse('admin:admin_views_article_add'))
self.assertContains(addpage, change_list_link,
msg_prefix='Unrestricted user is not given link to change list view in breadcrumbs.')
post = self.client.post(reverse('admin:admin_views_article_add'), add_dict)
self.assertRedirects(post, reverse('admin:admin_views_article_changelist'))
self.assertEqual(Article.objects.count(), 5)
self.client.get(reverse('admin:logout'))
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
self.client.login(username='joepublic', password='secret')
# Check and make sure that if user expires, data still persists
self.client.get(self.index_url)
self.client.post(login_url, self.super_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
def test_change_view(self):
"""Change view should restrict access and allow users to edit items."""
login_url = '%s?next=%s' % (reverse('admin:login'), reverse('admin:index'))
change_dict = {'title': 'Ikke fordømt',
'content': '<p>edited article</p>',
'date_0': '2008-03-18', 'date_1': '10:54:39',
'section': self.s1.pk}
article_change_url = reverse('admin:admin_views_article_change', args=(self.a1.pk,))
article_changelist_url = reverse('admin:admin_views_article_changelist')
# add user should not be able to view the list of article or change any of them
self.client.get(self.index_url)
self.client.post(login_url, self.adduser_login)
response = self.client.get(article_changelist_url)
self.assertEqual(response.status_code, 403)
response = self.client.get(article_change_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.client.get(reverse('admin:logout'))
# change user can view all items and edit them
self.client.get(self.index_url)
self.client.post(login_url, self.changeuser_login)
response = self.client.get(article_changelist_url)
self.assertEqual(response.status_code, 200)
response = self.client.get(article_change_url)
self.assertEqual(response.status_code, 200)
post = self.client.post(article_change_url, change_dict)
self.assertRedirects(post, article_changelist_url)
self.assertEqual(Article.objects.get(pk=self.a1.pk).content, '<p>edited article</p>')
# one error in form should produce singular error message, multiple errors plural
change_dict['title'] = ''
post = self.client.post(article_change_url, change_dict)
self.assertContains(post, 'Please correct the error below.',
msg_prefix='Singular error message not found in response to post with one error')
change_dict['content'] = ''
post = self.client.post(article_change_url, change_dict)
self.assertContains(post, 'Please correct the errors below.',
msg_prefix='Plural error message not found in response to post with multiple errors')
self.client.get(reverse('admin:logout'))
# Test redirection when using row-level change permissions. Refs #11513.
r1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
r2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
change_url_1 = reverse('admin:admin_views_rowlevelchangepermissionmodel_change', args=(r1.pk,))
change_url_2 = reverse('admin:admin_views_rowlevelchangepermissionmodel_change', args=(r2.pk,))
for login_dict in [self.super_login, self.changeuser_login, self.adduser_login, self.deleteuser_login]:
self.client.post(login_url, login_dict)
response = self.client.get(change_url_1)
self.assertEqual(response.status_code, 403)
response = self.client.post(change_url_1, {'name': 'changed'})
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=1).name, 'odd id')
self.assertEqual(response.status_code, 403)
response = self.client.get(change_url_2)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_2, {'name': 'changed'})
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=2).name, 'changed')
self.assertRedirects(response, self.index_url)
self.client.get(reverse('admin:logout'))
for login_dict in [self.joepublic_login, self.no_username_login]:
self.client.post(login_url, login_dict)
response = self.client.get(change_url_1, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
response = self.client.post(change_url_1, {'name': 'changed'}, follow=True)
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=1).name, 'odd id')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
response = self.client.get(change_url_2, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
response = self.client.post(change_url_2, {'name': 'changed again'}, follow=True)
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=2).name, 'changed')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
self.client.get(reverse('admin:logout'))
def test_delete_view(self):
"""Delete view should restrict access and actually delete items."""
delete_dict = {'post': 'yes'}
delete_url = reverse('admin:admin_views_article_delete', args=(self.a1.pk,))
# add user should not be able to delete articles
self.client.login(**self.adduser_login)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# Delete user can delete
self.client.login(**self.deleteuser_login)
response = self.client.get(reverse('admin:admin_views_section_delete', args=(self.s1.pk,)))
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 3</li>")
# test response contains link to related Article
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 1</li>")
self.assertEqual(response.status_code, 200)
post = self.client.post(delete_url, delete_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a deleted object')
article_ct = ContentType.objects.get_for_model(Article)
logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION)
self.assertEqual(logged.object_id, str(self.a1.pk))
def test_history_view(self):
"""History view should restrict access."""
login_url = '%s?next=%s' % (reverse('admin:login'), reverse('admin:index'))
# add user should not be able to view the list of article or change any of them
self.client.get(self.index_url)
self.client.post(login_url, self.adduser_login)
response = self.client.get(reverse('admin:admin_views_article_history', args=(self.a1.pk,)))
self.assertEqual(response.status_code, 403)
self.client.get(reverse('admin:logout'))
# change user can view all items and edit them
self.client.get(self.index_url)
self.client.post(login_url, self.changeuser_login)
response = self.client.get(reverse('admin:admin_views_article_history', args=(self.a1.pk,)))
self.assertEqual(response.status_code, 200)
# Test redirection when using row-level change permissions. Refs #11513.
rl1 = RowLevelChangePermissionModel.objects.create(name="odd id")
rl2 = RowLevelChangePermissionModel.objects.create(name="even id")
for login_dict in [self.super_login, self.changeuser_login, self.adduser_login, self.deleteuser_login]:
self.client.post(login_url, login_dict)
response = self.client.get(reverse('admin:admin_views_rowlevelchangepermissionmodel_history', args=(rl1.pk,)))
self.assertEqual(response.status_code, 403)
response = self.client.get(reverse('admin:admin_views_rowlevelchangepermissionmodel_history', args=(rl2.pk,)))
self.assertEqual(response.status_code, 200)
self.client.get(reverse('admin:logout'))
for login_dict in [self.joepublic_login, self.no_username_login]:
self.client.post(login_url, login_dict)
response = self.client.get(
reverse('admin:admin_views_rowlevelchangepermissionmodel_history', args=(rl1.pk,)), follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
response = self.client.get(
reverse('admin:admin_views_rowlevelchangepermissionmodel_history', args=(rl2.pk,)), follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
self.client.get(reverse('admin:logout'))
def test_history_view_bad_url(self):
self.client.post(reverse('admin:login'), self.changeuser_login)
response = self.client.get(reverse('admin:admin_views_article_history', args=('foo',)))
self.assertEqual(response.status_code, 404)
def test_conditionally_show_add_section_link(self):
"""
The foreign key widget should only show the "add related" button if the
user has permission to add that related item.
"""
self.client.login(**self.adduser_login)
# The user can't add sections yet, so they shouldn't see the "add section" link.
url = reverse('admin:admin_views_article_add')
add_link_text = 'add_id_section'
response = self.client.get(url)
self.assertNotContains(response, add_link_text)
# Allow the user to add sections too. Now they can see the "add section" link.
user = User.objects.get(username='adduser')
perm = get_perm(Section, get_permission_codename('add', Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertContains(response, add_link_text)
def test_conditionally_show_change_section_link(self):
"""
The foreign key widget should only show the "change related" button if
the user has permission to change that related item.
"""
def get_change_related(response):
return response.context['adminform'].form.fields['section'].widget.can_change_related
self.client.login(**self.adduser_login)
# The user can't change sections yet, so they shouldn't see the "change section" link.
url = reverse('admin:admin_views_article_add')
change_link_text = 'change_id_section'
response = self.client.get(url)
self.assertFalse(get_change_related(response))
self.assertNotContains(response, change_link_text)
# Allow the user to change sections too. Now they can see the "change section" link.
user = User.objects.get(username='adduser')
perm = get_perm(Section, get_permission_codename('change', Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_change_related(response))
self.assertContains(response, change_link_text)
def test_conditionally_show_delete_section_link(self):
"""
The foreign key widget should only show the "delete related" button if
the user has permission to delete that related item.
"""
def get_delete_related(response):
return response.context['adminform'].form.fields['sub_section'].widget.can_delete_related
self.client.login(**self.adduser_login)
# The user can't delete sections yet, so they shouldn't see the "delete section" link.
url = reverse('admin:admin_views_article_add')
delete_link_text = 'delete_id_sub_section'
response = self.client.get(url)
self.assertFalse(get_delete_related(response))
self.assertNotContains(response, delete_link_text)
# Allow the user to delete sections too. Now they can see the "delete section" link.
user = User.objects.get(username='adduser')
perm = get_perm(Section, get_permission_codename('delete', Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_delete_related(response))
self.assertContains(response, delete_link_text)
def test_disabled_permissions_when_logged_in(self):
self.client.login(username='super', password='secret')
superuser = User.objects.get(username='super')
superuser.is_active = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, 'Log out')
response = self.client.get(reverse('secure_view'), follow=True)
self.assertContains(response, 'id="login-form"')
def test_disabled_staff_permissions_when_logged_in(self):
self.client.login(username='super', password='secret')
superuser = User.objects.get(username='super')
superuser.is_staff = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, 'Log out')
response = self.client.get(reverse('secure_view'), follow=True)
self.assertContains(response, 'id="login-form"')
def test_app_index_fail_early(self):
"""
If a user has no module perms, avoid iterating over all the modeladmins
in the registry.
"""
opts = Article._meta
change_user = User.objects.get(username='changeuser')
permission = get_perm(Article, get_permission_codename('change', opts))
self.client.login(**self.changeuser_login)
# the user has no module permissions, because this module doesn't exist
change_user.user_permissions.remove(permission)
response = self.client.get(reverse('admin:app_list', args=('admin_views',)))
self.assertEqual(response.status_code, 403)
# the user now has module permissions
change_user.user_permissions.add(permission)
response = self.client.get(reverse('admin:app_list', args=('admin_views',)))
self.assertEqual(response.status_code, 200)
def test_shortcut_view_only_available_to_staff(self):
"""
Only admin users should be able to use the admin shortcut view.
"""
model_ctype = ContentType.objects.get_for_model(ModelWithStringPrimaryKey)
obj = ModelWithStringPrimaryKey.objects.create(string_pk='foo')
shortcut_url = reverse('admin:view_on_site', args=(model_ctype.pk, obj.pk))
# Not logged in: we should see the login page.
response = self.client.get(shortcut_url, follow=True)
self.assertTemplateUsed(response, 'admin/login.html')
# Logged in? Redirect.
self.client.login(username='super', password='secret')
response = self.client.get(shortcut_url, follow=False)
# Can't use self.assertRedirects() because User.get_absolute_url() is silly.
self.assertEqual(response.status_code, 302)
# Domain may depend on contrib.sites tests also run
six.assertRegex(self, response.url, 'http://(testserver|example.com)/dummy/foo/')
def test_has_module_permission(self):
"""
Ensure that has_module_permission() returns True for all users who
have any permission for that module (add, change, or delete), so that
the module is displayed on the admin index page.
"""
self.client.login(**self.super_login)
response = self.client.get(self.index_url)
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.logout()
self.client.login(**self.adduser_login)
response = self.client.get(self.index_url)
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.logout()
self.client.login(**self.changeuser_login)
response = self.client.get(self.index_url)
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.logout()
self.client.login(**self.deleteuser_login)
response = self.client.get(self.index_url)
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
def test_overriding_has_module_permission(self):
"""
Ensure that overriding has_module_permission() has the desired effect.
In this case, it always returns False, so the module should not be
displayed on the admin index page for any users.
"""
index_url = reverse('admin7:index')
self.client.login(**self.super_login)
response = self.client.get(index_url)
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, 'Articles')
self.client.logout()
self.client.login(**self.adduser_login)
response = self.client.get(index_url)
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, 'Articles')
self.client.logout()
self.client.login(**self.changeuser_login)
response = self.client.get(index_url)
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, 'Articles')
self.client.logout()
self.client.login(**self.deleteuser_login)
response = self.client.get(index_url)
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, 'Articles')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminViewsNoUrlTest(TestCase):
"""Regression test for #17333"""
@classmethod
def setUpTestData(cls):
cls.u3 = User.objects.create(
id=102, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='changeuser',
first_name='Change', last_name='User', email='cuser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
opts = Report._meta
# User who can change Reports
cls.u3.user_permissions.add(get_perm(Report, get_permission_codename('change', opts)))
# login POST dict
cls.changeuser_login = {
REDIRECT_FIELD_NAME: reverse('admin:index'),
'username': 'changeuser',
'password': 'secret',
}
def test_no_standard_modeladmin_urls(self):
"""Admin index views don't break when user's ModelAdmin removes standard urls"""
self.client.get(reverse('admin:index'))
r = self.client.post(reverse('admin:login'), self.changeuser_login)
r = self.client.get(reverse('admin:index'))
# we shouldn't get a 500 error caused by a NoReverseMatch
self.assertEqual(r.status_code, 200)
self.client.get(reverse('admin:logout'))
@skipUnlessDBFeature('can_defer_constraint_checks')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminViewDeletedObjectsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
id=101, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='adduser',
first_name='Add', last_name='User', email='auser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u3 = User.objects.create(
id=102, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='changeuser',
first_name='Change', last_name='User', email='cuser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u4 = User.objects.create(
id=103, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='deleteuser',
first_name='Delete', last_name='User', email='duser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u5 = User.objects.create(
id=104, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='joepublic',
first_name='Joe', last_name='Public', email='joepublic@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u6 = User.objects.create(
id=106, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='nostaff',
first_name='No', last_name='Staff', email='nostaff@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
cls.v1 = Villain.objects.create(name='Adam')
cls.v2 = Villain.objects.create(name='Sue')
cls.sv1 = SuperVillain.objects.create(name='Bob')
cls.pl1 = Plot.objects.create(name='World Domination', team_leader=cls.v1, contact=cls.v2)
cls.pl2 = Plot.objects.create(name='World Peace', team_leader=cls.v2, contact=cls.v2)
cls.pl3 = Plot.objects.create(name='Corn Conspiracy', team_leader=cls.v1, contact=cls.v1)
cls.pd1 = PlotDetails.objects.create(details='almost finished', plot=cls.pl1)
cls.sh1 = SecretHideout.objects.create(location='underground bunker', villain=cls.v1)
cls.sh2 = SecretHideout.objects.create(location='floating castle', villain=cls.sv1)
cls.ssh1 = SuperSecretHideout.objects.create(location='super floating castle!', supervillain=cls.sv1)
cls.cy1 = CyclicOne.objects.create(name='I am recursive', two_id=1)
cls.cy2 = CyclicTwo.objects.create(name='I am recursive too', one_id=1)
def setUp(self):
self.client.login(username='super', password='secret')
def test_nesting(self):
"""
Objects should be nested to display the relationships that
cause them to be scheduled for deletion.
"""
pattern = re.compile(force_bytes(
r'<li>Plot: <a href="%s">World Domination</a>\s*<ul>\s*<li>Plot details: <a href="%s">almost finished</a>' % (
reverse('admin:admin_views_plot_change', args=(self.pl1.pk,)),
reverse('admin:admin_views_plotdetails_change', args=(self.pd1.pk,)))
))
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.v1.pk,)))
six.assertRegex(self, response.content, pattern)
def test_cyclic(self):
"""
Cyclic relationships should still cause each object to only be
listed once.
"""
one = '<li>Cyclic one: <a href="%s">I am recursive</a>' % (
reverse('admin:admin_views_cyclicone_change', args=(self.cy1.pk,)),
)
two = '<li>Cyclic two: <a href="%s">I am recursive too</a>' % (
reverse('admin:admin_views_cyclictwo_change', args=(self.cy2.pk,)),
)
response = self.client.get(reverse('admin:admin_views_cyclicone_delete', args=(self.cy1.pk,)))
self.assertContains(response, one, 1)
self.assertContains(response, two, 1)
def test_perms_needed(self):
self.client.logout()
delete_user = User.objects.get(username='deleteuser')
delete_user.user_permissions.add(get_perm(Plot,
get_permission_codename('delete', Plot._meta)))
self.assertTrue(self.client.login(username='deleteuser',
password='secret'))
response = self.client.get(reverse('admin:admin_views_plot_delete', args=(self.pl1.pk,)))
self.assertContains(response, "your account doesn't have permission to delete the following types of objects")
self.assertContains(response, "<li>plot details</li>")
def test_protected(self):
q = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q, answer="Because.")
a2 = Answer.objects.create(question=q, answer="Yes.")
response = self.client.get(reverse('admin:admin_views_question_delete', args=(q.pk,)))
self.assertContains(response, "would require deleting the following protected related objects")
self.assertContains(
response,
'<li>Answer: <a href="%s">Because.</a></li>' % reverse('admin:admin_views_answer_change', args=(a1.pk,))
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Yes.</a></li>' % reverse('admin:admin_views_answer_change', args=(a2.pk,))
)
def test_not_registered(self):
should_contain = """<li>Secret hideout: underground bunker"""
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.v1.pk,)))
self.assertContains(response, should_contain, 1)
def test_multiple_fkeys_to_same_model(self):
"""
If a deleted object has two relationships from another model,
both of those should be followed in looking for related
objects to delete.
"""
should_contain = '<li>Plot: <a href="%s">World Domination</a>' % reverse(
'admin:admin_views_plot_change', args=(self.pl1.pk,)
)
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.v1.pk,)))
self.assertContains(response, should_contain)
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.v2.pk,)))
self.assertContains(response, should_contain)
def test_multiple_fkeys_to_same_instance(self):
"""
If a deleted object has two relationships pointing to it from
another object, the other object should still only be listed
once.
"""
should_contain = '<li>Plot: <a href="%s">World Peace</a></li>' % reverse(
'admin:admin_views_plot_change', args=(self.pl2.pk,)
)
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.v2.pk,)))
self.assertContains(response, should_contain, 1)
def test_inheritance(self):
"""
In the case of an inherited model, if either the child or
parent-model instance is deleted, both instances are listed
for deletion, as well as any relationships they have.
"""
should_contain = [
'<li>Villain: <a href="%s">Bob</a>' % reverse('admin:admin_views_villain_change', args=(self.sv1.pk,)),
'<li>Super villain: <a href="%s">Bob</a>' % reverse('admin:admin_views_supervillain_change', args=(self.sv1.pk,)),
'<li>Secret hideout: floating castle',
'<li>Super secret hideout: super floating castle!',
]
response = self.client.get(reverse('admin:admin_views_villain_delete', args=(self.sv1.pk,)))
for should in should_contain:
self.assertContains(response, should, 1)
response = self.client.get(reverse('admin:admin_views_supervillain_delete', args=(self.sv1.pk,)))
for should in should_contain:
self.assertContains(response, should, 1)
def test_generic_relations(self):
"""
If a deleted object has GenericForeignKeys pointing to it,
those objects should be listed for deletion.
"""
plot = self.pl3
tag = FunkyTag.objects.create(content_object=plot, name='hott')
should_contain = '<li>Funky tag: <a href="%s">hott' % reverse(
'admin:admin_views_funkytag_change', args=(tag.id,))
response = self.client.get(reverse('admin:admin_views_plot_delete', args=(plot.pk,)))
self.assertContains(response, should_contain)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class TestGenericRelations(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.v1 = Villain.objects.create(name='Adam')
cls.pl3 = Plot.objects.create(name='Corn Conspiracy', team_leader=cls.v1, contact=cls.v1)
def setUp(self):
self.client.login(username='super', password='secret')
def test_generic_content_object_in_list_display(self):
FunkyTag.objects.create(content_object=self.pl3, name='hott')
response = self.client.get(reverse('admin:admin_views_funkytag_changelist'))
self.assertContains(response, "%s</td>" % self.pl3)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminViewStringPrimaryKeyTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
id=101, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='adduser',
first_name='Add', last_name='User', email='auser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u3 = User.objects.create(
id=102, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='changeuser',
first_name='Change', last_name='User', email='cuser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u4 = User.objects.create(
id=103, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='deleteuser',
first_name='Delete', last_name='User', email='duser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u5 = User.objects.create(
id=104, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='joepublic',
first_name='Joe', last_name='Public', email='joepublic@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u6 = User.objects.create(
id=106, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='nostaff',
first_name='No', last_name='Staff', email='nostaff@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
cls.pk = """abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 -_.!~*'() ;/?:@&=+$, <>#%" {}|\^[]`"""
cls.m1 = ModelWithStringPrimaryKey.objects.create(string_pk=cls.pk)
content_type_pk = ContentType.objects.get_for_model(ModelWithStringPrimaryKey).pk
LogEntry.objects.log_action(100, content_type_pk, cls.pk, cls.pk, 2, change_message='Changed something')
def setUp(self):
self.client.login(username='super', password='secret')
def test_get_history_view(self):
"""
Retrieving the history for an object using urlencoded form of primary
key should work.
Refs #12349, #18550.
"""
response = self.client.get(reverse('admin:admin_views_modelwithstringprimarykey_history', args=(self.pk,)))
self.assertContains(response, escape(self.pk))
self.assertContains(response, 'Changed something')
self.assertEqual(response.status_code, 200)
def test_get_change_view(self):
"Retrieving the object using urlencoded form of primary key should work"
response = self.client.get(reverse('admin:admin_views_modelwithstringprimarykey_change', args=(self.pk,)))
self.assertContains(response, escape(self.pk))
self.assertEqual(response.status_code, 200)
def test_changelist_to_changeform_link(self):
"Link to the changeform of the object in changelist should use reverse() and be quoted -- #18072"
response = self.client.get(reverse('admin:admin_views_modelwithstringprimarykey_changelist'))
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
pk_final_url = escape(iri_to_uri(quote(self.pk)))
change_url = reverse(
'admin:admin_views_modelwithstringprimarykey_change', args=('__fk__',)
).replace('__fk__', pk_final_url)
should_contain = '<th class="field-__str__"><a href="%s">%s</a></th>' % (change_url, escape(self.pk))
self.assertContains(response, should_contain)
def test_recentactions_link(self):
"The link from the recent actions list referring to the changeform of the object should be quoted"
response = self.client.get(reverse('admin:index'))
link = reverse('admin:admin_views_modelwithstringprimarykey_change', args=(quote(self.pk),))
should_contain = """<a href="%s">%s</a>""" % (escape(link), escape(self.pk))
self.assertContains(response, should_contain)
def test_recentactions_without_content_type(self):
"If a LogEntry is missing content_type it will not display it in span tag under the hyperlink."
response = self.client.get(reverse('admin:index'))
link = reverse('admin:admin_views_modelwithstringprimarykey_change', args=(quote(self.pk),))
should_contain = """<a href="%s">%s</a>""" % (escape(link), escape(self.pk))
self.assertContains(response, should_contain)
should_contain = "Model with string primary key" # capitalized in Recent Actions
self.assertContains(response, should_contain)
logentry = LogEntry.objects.get(content_type__model__iexact='modelwithstringprimarykey')
# http://code.djangoproject.com/ticket/10275
# if the log entry doesn't have a content type it should still be
# possible to view the Recent Actions part
logentry.content_type = None
logentry.save()
counted_presence_before = response.content.count(force_bytes(should_contain))
response = self.client.get(reverse('admin:index'))
counted_presence_after = response.content.count(force_bytes(should_contain))
self.assertEqual(counted_presence_before - 1,
counted_presence_after)
def test_logentry_get_admin_url(self):
"LogEntry.get_admin_url returns a URL to edit the entry's object or None for non-existent (possibly deleted) models"
log_entry_model = "modelwithstringprimarykey" # capitalized in Recent Actions
logentry = LogEntry.objects.get(content_type__model__iexact=log_entry_model)
desired_admin_url = reverse('admin:admin_views_modelwithstringprimarykey_change', args=(quote(self.pk),))
self.assertEqual(logentry.get_admin_url(), desired_admin_url)
self.assertIn(iri_to_uri(quote(self.pk)), logentry.get_admin_url())
logentry.content_type.model = "non-existent"
self.assertEqual(logentry.get_admin_url(), None)
def test_logentry_get_edited_object(self):
"LogEntry.get_edited_object returns the edited object of a given LogEntry object"
logentry = LogEntry.objects.get(content_type__model__iexact="modelwithstringprimarykey")
edited_obj = logentry.get_edited_object()
self.assertEqual(logentry.object_id, str(edited_obj.pk))
def test_deleteconfirmation_link(self):
"The link from the delete confirmation page referring back to the changeform of the object should be quoted"
response = self.client.get(reverse('admin:admin_views_modelwithstringprimarykey_delete', args=(quote(self.pk),)))
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
change_url = reverse(
'admin:admin_views_modelwithstringprimarykey_change', args=('__fk__',)
).replace('__fk__', escape(iri_to_uri(quote(self.pk))))
should_contain = '<a href="%s">%s</a>' % (change_url, escape(self.pk))
self.assertContains(response, should_contain)
def test_url_conflicts_with_add(self):
"A model with a primary key that ends with add or is `add` should be visible"
add_model = ModelWithStringPrimaryKey.objects.create(pk="i have something to add")
add_model.save()
response = self.client.get(
reverse('admin:admin_views_modelwithstringprimarykey_change', args=(quote(add_model.pk),))
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
add_model2 = ModelWithStringPrimaryKey.objects.create(pk="add")
add_url = reverse('admin:admin_views_modelwithstringprimarykey_add')
change_url = reverse('admin:admin_views_modelwithstringprimarykey_change', args=(quote(add_model2.pk),))
self.assertNotEqual(add_url, change_url)
def test_url_conflicts_with_delete(self):
"A model with a primary key that ends with delete should be visible"
delete_model = ModelWithStringPrimaryKey(pk="delete")
delete_model.save()
response = self.client.get(
reverse('admin:admin_views_modelwithstringprimarykey_change', args=(quote(delete_model.pk),))
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_history(self):
"A model with a primary key that ends with history should be visible"
history_model = ModelWithStringPrimaryKey(pk="history")
history_model.save()
response = self.client.get(
reverse('admin:admin_views_modelwithstringprimarykey_change', args=(quote(history_model.pk),))
)
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_shortcut_view_with_escaping(self):
"'View on site should' work properly with char fields"
model = ModelWithStringPrimaryKey(pk='abc_123')
model.save()
response = self.client.get(
reverse('admin:admin_views_modelwithstringprimarykey_change', args=(quote(model.pk),))
)
should_contain = '/%s/" class="viewsitelink">' % model.pk
self.assertContains(response, should_contain)
def test_change_view_history_link(self):
"""Object history button link should work and contain the pk value quoted."""
url = reverse('admin:%s_modelwithstringprimarykey_change' %
ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
expected_link = reverse('admin:%s_modelwithstringprimarykey_history' %
ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),))
self.assertContains(response, '<a href="%s" class="historylink"' % escape(expected_link))
def test_redirect_on_add_view_continue_button(self):
"""As soon as an object is added using "Save and continue editing"
button, the user should be redirected to the object's change_view.
In case primary key is a string containing some special characters
like slash or underscore, these characters must be escaped (see #22266)
"""
response = self.client.post(
reverse('admin:admin_views_modelwithstringprimarykey_add'),
{
'string_pk': '123/history',
"_continue": "1", # Save and continue editing
}
)
self.assertEqual(response.status_code, 302) # temporary redirect
self.assertIn('/123_2Fhistory/', response['location']) # PK is quoted
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class SecureViewTests(TestCase):
"""
Test behavior of a view protected by the staff_member_required decorator.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def test_secure_view_shows_login_if_not_logged_in(self):
"""
Ensure that we see the admin login form.
"""
secure_url = reverse('secure_view')
response = self.client.get(secure_url)
self.assertRedirects(response, '%s?next=%s' % (reverse('admin:login'), secure_url))
response = self.client.get(secure_url, follow=True)
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(response.context[REDIRECT_FIELD_NAME], secure_url)
def test_staff_member_required_decorator_works_with_argument(self):
"""
Ensure that staff_member_required decorator works with an argument
(redirect_field_name).
"""
secure_url = '/test_admin/admin/secure-view2/'
response = self.client.get(secure_url)
self.assertRedirects(response, '%s?myfield=%s' % (reverse('admin:login'), secure_url))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminViewUnicodeTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.b1 = Book.objects.create(name='Lærdommer')
cls.p1 = Promo.objects.create(name='<Promo for Lærdommer>', book=cls.b1)
cls.chap1 = Chapter.objects.create(
title='Norske bostaver æøå skaper problemer', content='<p>Svært frustrerende med UnicodeDecodeErro</p>',
book=cls.b1
)
cls.chap2 = Chapter.objects.create(
title='Kjærlighet', content='<p>La kjærligheten til de lidende seire.</p>', book=cls.b1)
cls.chap3 = Chapter.objects.create(title='Kjærlighet', content='<p>Noe innhold</p>', book=cls.b1)
cls.chap4 = ChapterXtra1.objects.create(chap=cls.chap1, xtra='<Xtra(1) Norske bostaver æøå skaper problemer>')
cls.chap5 = ChapterXtra1.objects.create(chap=cls.chap2, xtra='<Xtra(1) Kjærlighet>')
cls.chap6 = ChapterXtra1.objects.create(chap=cls.chap3, xtra='<Xtra(1) Kjærlighet>')
cls.chap7 = ChapterXtra2.objects.create(chap=cls.chap1, xtra='<Xtra(2) Norske bostaver æøå skaper problemer>')
cls.chap8 = ChapterXtra2.objects.create(chap=cls.chap2, xtra='<Xtra(2) Kjærlighet>')
cls.chap9 = ChapterXtra2.objects.create(chap=cls.chap3, xtra='<Xtra(2) Kjærlighet>')
def setUp(self):
self.client.login(username='super', password='secret')
def test_unicode_edit(self):
"""
A test to ensure that POST on edit_view handles non-ASCII characters.
"""
post_data = {
"name": "Test lærdommer",
# inline data
"chapter_set-TOTAL_FORMS": "6",
"chapter_set-INITIAL_FORMS": "3",
"chapter_set-MAX_NUM_FORMS": "0",
"chapter_set-0-id": self.chap1.pk,
"chapter_set-0-title": "Norske bostaver æøå skaper problemer",
"chapter_set-0-content": "<p>Svært frustrerende med UnicodeDecodeError</p>",
"chapter_set-1-id": self.chap2.id,
"chapter_set-1-title": "Kjærlighet.",
"chapter_set-1-content": "<p>La kjærligheten til de lidende seire.</p>",
"chapter_set-2-id": self.chap3.id,
"chapter_set-2-title": "Need a title.",
"chapter_set-2-content": "<p>Newest content</p>",
"chapter_set-3-id": "",
"chapter_set-3-title": "",
"chapter_set-3-content": "",
"chapter_set-4-id": "",
"chapter_set-4-title": "",
"chapter_set-4-content": "",
"chapter_set-5-id": "",
"chapter_set-5-title": "",
"chapter_set-5-content": "",
}
response = self.client.post(reverse('admin:admin_views_book_change', args=(self.b1.pk,)), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_unicode_delete(self):
"""
Ensure that the delete_view handles non-ASCII characters
"""
delete_dict = {'post': 'yes'}
delete_url = reverse('admin:admin_views_book_delete', args=(self.b1.pk,))
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(delete_url, delete_dict)
self.assertRedirects(response, reverse('admin:admin_views_book_changelist'))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminViewListEditable(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
id=101, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='adduser',
first_name='Add', last_name='User', email='auser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u3 = User.objects.create(
id=102, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='changeuser',
first_name='Change', last_name='User', email='cuser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u4 = User.objects.create(
id=103, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='deleteuser',
first_name='Delete', last_name='User', email='duser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u5 = User.objects.create(
id=104, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='joepublic',
first_name='Joe', last_name='Public', email='joepublic@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u6 = User.objects.create(
id=106, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='nostaff',
first_name='No', last_name='Staff', email='nostaff@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
cls.per1 = Person.objects.create(name='John Mauchly', gender=1, alive=True)
cls.per2 = Person.objects.create(name='Grace Hopper', gender=1, alive=False)
cls.per3 = Person.objects.create(name='Guido van Rossum', gender=1, alive=True)
def setUp(self):
self.client.login(username='super', password='secret')
def test_inheritance(self):
Podcast.objects.create(name="This Week in Django",
release_date=datetime.date.today())
response = self.client.get(reverse('admin:admin_views_podcast_changelist'))
self.assertEqual(response.status_code, 200)
def test_inheritance_2(self):
Vodcast.objects.create(name="This Week in Django", released=True)
response = self.client.get(reverse('admin:admin_views_vodcast_changelist'))
self.assertEqual(response.status_code, 200)
def test_custom_pk(self):
Language.objects.create(iso='en', name='English', english_name='English')
response = self.client.get(reverse('admin:admin_views_language_changelist'))
self.assertEqual(response.status_code, 200)
def test_changelist_input_html(self):
response = self.client.get(reverse('admin:admin_views_person_changelist'))
# 2 inputs per object(the field and the hidden id field) = 6
# 4 management hidden fields = 4
# 4 action inputs (3 regular checkboxes, 1 checkbox to select all)
# main form submit button = 1
# search field and search submit button = 2
# CSRF field = 1
# field to track 'select all' across paginated views = 1
# 6 + 4 + 4 + 1 + 2 + 1 + 1 = 19 inputs
self.assertContains(response, "<input", count=19)
# 1 select per object = 3 selects
self.assertContains(response, "<select", count=4)
def test_post_messages(self):
# Ticket 12707: Saving inline editable should not show admin
# action warnings
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "%s" % self.per1.pk,
"form-1-gender": "2",
"form-1-id": "%s" % self.per2.pk,
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "%s" % self.per3.pk,
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_person_changelist'),
data, follow=True)
self.assertEqual(len(response.context['messages']), 1)
def test_post_submission(self):
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "%s" % self.per1.pk,
"form-1-gender": "2",
"form-1-id": "%s" % self.per2.pk,
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "%s" % self.per3.pk,
"_save": "Save",
}
self.client.post(reverse('admin:admin_views_person_changelist'), data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
# test a filtered page
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "%s" % self.per1.pk,
"form-0-gender": "1",
"form-0-alive": "checked",
"form-1-id": "%s" % self.per3.pk,
"form-1-gender": "1",
"form-1-alive": "checked",
"_save": "Save",
}
self.client.post(reverse('admin:admin_views_person_changelist') + '?gender__exact=1', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, True)
# test a searched page
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "%s" % self.per1.pk,
"form-0-gender": "1",
"_save": "Save",
}
self.client.post(reverse('admin:admin_views_person_changelist') + '?q=john', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
def test_non_field_errors(self):
''' Ensure that non field errors are displayed for each of the
forms in the changelist's formset. Refs #13126.
'''
fd1 = FoodDelivery.objects.create(reference='123', driver='bill', restaurant='thai')
fd2 = FoodDelivery.objects.create(reference='456', driver='bill', restaurant='india')
fd3 = FoodDelivery.objects.create(reference='789', driver='bill', restaurant='pizza')
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "pizza",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_fooddelivery_changelist'), data)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
'with this Driver and Restaurant already exists.</li></ul></td></tr>',
1,
html=True
)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
# Same data also.
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "thai",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_fooddelivery_changelist'), data)
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery '
'with this Driver and Restaurant already exists.</li></ul></td></tr>',
2,
html=True
)
def test_non_form_errors(self):
# test if non-form errors are handled; ticket #12716
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "%s" % self.per2.pk,
"form-0-alive": "1",
"form-0-gender": "2",
# Ensure that the form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_person_changelist'), data)
self.assertContains(response, "Grace is not a Zombie")
def test_non_form_errors_is_errorlist(self):
# test if non-form errors are correctly handled; ticket #12878
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "%s" % self.per2.pk,
"form-0-alive": "1",
"form-0-gender": "2",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_person_changelist'), data)
non_form_errors = response.context['cl'].formset.non_form_errors()
self.assertIsInstance(non_form_errors, ErrorList)
self.assertEqual(str(non_form_errors), str(ErrorList(["Grace is not a Zombie"])))
def test_list_editable_ordering(self):
collector = Collector.objects.create(id=1, name="Frederick Clegg")
Category.objects.create(id=1, order=1, collector=collector)
Category.objects.create(id=2, order=2, collector=collector)
Category.objects.create(id=3, order=0, collector=collector)
Category.objects.create(id=4, order=0, collector=collector)
# NB: The order values must be changed so that the items are reordered.
data = {
"form-TOTAL_FORMS": "4",
"form-INITIAL_FORMS": "4",
"form-MAX_NUM_FORMS": "0",
"form-0-order": "14",
"form-0-id": "1",
"form-0-collector": "1",
"form-1-order": "13",
"form-1-id": "2",
"form-1-collector": "1",
"form-2-order": "1",
"form-2-id": "3",
"form-2-collector": "1",
"form-3-order": "0",
"form-3-id": "4",
"form-3-collector": "1",
# Ensure that the form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_category_changelist'), data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# Check that the order values have been applied to the right objects
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
def test_list_editable_pagination(self):
"""
Ensure that pagination works for list_editable items.
Refs #16819.
"""
UnorderedObject.objects.create(id=1, name='Unordered object #1')
UnorderedObject.objects.create(id=2, name='Unordered object #2')
UnorderedObject.objects.create(id=3, name='Unordered object #3')
response = self.client.get(reverse('admin:admin_views_unorderedobject_changelist'))
self.assertContains(response, 'Unordered object #3')
self.assertContains(response, 'Unordered object #2')
self.assertNotContains(response, 'Unordered object #1')
response = self.client.get(reverse('admin:admin_views_unorderedobject_changelist') + '?p=1')
self.assertNotContains(response, 'Unordered object #3')
self.assertNotContains(response, 'Unordered object #2')
self.assertContains(response, 'Unordered object #1')
def test_list_editable_action_submit(self):
# List editable changes should not be executed if the action "Go" button is
# used to submit the form.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"index": "0",
"_selected_action": ['3'],
"action": ['', 'delete_selected'],
}
self.client.post(reverse('admin:admin_views_person_changelist'), data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, True)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 1)
def test_list_editable_action_choices(self):
# List editable changes should be executed if the "Save" button is
# used to submit the form - any action choices should be ignored.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "%s" % self.per1.pk,
"form-1-gender": "2",
"form-1-id": "%s" % self.per2.pk,
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "%s" % self.per3.pk,
"_save": "Save",
"_selected_action": ['1'],
"action": ['', 'delete_selected'],
}
self.client.post(reverse('admin:admin_views_person_changelist'), data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
def test_list_editable_popup(self):
"""
Fields should not be list-editable in popups.
"""
response = self.client.get(reverse('admin:admin_views_person_changelist'))
self.assertNotEqual(response.context['cl'].list_editable, ())
response = self.client.get(reverse('admin:admin_views_person_changelist') + '?%s' % IS_POPUP_VAR)
self.assertEqual(response.context['cl'].list_editable, ())
def test_pk_hidden_fields(self):
""" Ensure that hidden pk fields aren't displayed in the table body and
that their corresponding human-readable value is displayed instead.
Note that the hidden pk fields are in fact be displayed but
separately (not in the table), and only once.
Refs #12475.
"""
story1 = Story.objects.create(title='The adventures of Guido', content='Once upon a time in Djangoland...')
story2 = Story.objects.create(title='Crouching Tiger, Hidden Python', content='The Python was sneaking into...')
response = self.client.get(reverse('admin:admin_views_story_changelist'))
self.assertContains(response, 'id="id_form-0-id"', 1) # Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(response, '<div class="hiddenfields">\n<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" />\n</div>' % (story2.id, story1.id), html=True)
self.assertContains(response, '<td class="field-id">%d</td>' % story1.id, 1)
self.assertContains(response, '<td class="field-id">%d</td>' % story2.id, 1)
def test_pk_hidden_fields_with_list_display_links(self):
""" Similarly as test_pk_hidden_fields, but when the hidden pk fields are
referenced in list_display_links.
Refs #12475.
"""
story1 = OtherStory.objects.create(title='The adventures of Guido', content='Once upon a time in Djangoland...')
story2 = OtherStory.objects.create(title='Crouching Tiger, Hidden Python', content='The Python was sneaking into...')
link1 = reverse('admin:admin_views_otherstory_change', args=(story1.pk,))
link2 = reverse('admin:admin_views_otherstory_change', args=(story2.pk,))
response = self.client.get(reverse('admin:admin_views_otherstory_changelist'))
self.assertContains(response, 'id="id_form-0-id"', 1) # Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(response, '<div class="hiddenfields">\n<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" />\n</div>' % (story2.id, story1.id), html=True)
self.assertContains(response, '<th class="field-id"><a href="%s">%d</a></th>' % (link1, story1.id), 1)
self.assertContains(response, '<th class="field-id"><a href="%s">%d</a></th>' % (link2, story2.id), 1)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminSearchTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
id=101, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='adduser',
first_name='Add', last_name='User', email='auser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u3 = User.objects.create(
id=102, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='changeuser',
first_name='Change', last_name='User', email='cuser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u4 = User.objects.create(
id=103, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='deleteuser',
first_name='Delete', last_name='User', email='duser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u5 = User.objects.create(
id=104, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='joepublic',
first_name='Joe', last_name='Public', email='joepublic@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u6 = User.objects.create(
id=106, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='nostaff',
first_name='No', last_name='Staff', email='nostaff@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
cls.per1 = Person.objects.create(name='John Mauchly', gender=1, alive=True)
cls.per2 = Person.objects.create(name='Grace Hopper', gender=1, alive=False)
cls.per3 = Person.objects.create(name='Guido van Rossum', gender=1, alive=True)
cls.t1 = Recommender.objects.create()
cls.t2 = Recommendation.objects.create(recommender=cls.t1)
cls.t3 = Recommender.objects.create()
cls.t4 = Recommendation.objects.create(recommender=cls.t3)
cls.tt1 = TitleTranslation.objects.create(title=cls.t1, text='Bar')
cls.tt2 = TitleTranslation.objects.create(title=cls.t2, text='Foo')
cls.tt3 = TitleTranslation.objects.create(title=cls.t3, text='Few')
cls.tt4 = TitleTranslation.objects.create(title=cls.t4, text='Bas')
def setUp(self):
self.client.login(username='super', password='secret')
def test_search_on_sibling_models(self):
"Check that a search that mentions sibling models"
response = self.client.get(reverse('admin:admin_views_recommendation_changelist') + '?q=bar')
# confirm the search returned 1 object
self.assertContains(response, "\n1 recommendation\n")
def test_with_fk_to_field(self):
"""
Ensure that the to_field GET parameter is preserved when a search
is performed. Refs #10918.
"""
response = self.client.get(reverse('admin:auth_user_changelist') + '?q=joe&%s=id' % TO_FIELD_VAR)
self.assertContains(response, "\n1 user\n")
self.assertContains(response, '<input type="hidden" name="%s" value="id"/>' % TO_FIELD_VAR, html=True)
def test_exact_matches(self):
response = self.client.get(reverse('admin:admin_views_recommendation_changelist') + '?q=bar')
# confirm the search returned one object
self.assertContains(response, "\n1 recommendation\n")
response = self.client.get(reverse('admin:admin_views_recommendation_changelist') + '?q=ba')
# confirm the search returned zero objects
self.assertContains(response, "\n0 recommendations\n")
def test_beginning_matches(self):
response = self.client.get(reverse('admin:admin_views_person_changelist') + '?q=Gui')
# confirm the search returned one object
self.assertContains(response, "\n1 person\n")
self.assertContains(response, "Guido")
response = self.client.get(reverse('admin:admin_views_person_changelist') + '?q=uido')
# confirm the search returned zero objects
self.assertContains(response, "\n0 persons\n")
self.assertNotContains(response, "Guido")
def test_pluggable_search(self):
PluggableSearchPerson.objects.create(name="Bob", age=10)
PluggableSearchPerson.objects.create(name="Amy", age=20)
response = self.client.get(reverse('admin:admin_views_pluggablesearchperson_changelist') + '?q=Bob')
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Bob")
response = self.client.get(reverse('admin:admin_views_pluggablesearchperson_changelist') + '?q=20')
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Amy")
def test_reset_link(self):
"""
Test presence of reset link in search bar ("1 result (_x total_)").
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
# + 1 for total count
with self.assertNumQueries(5):
response = self.client.get(reverse('admin:admin_views_person_changelist') + '?q=Gui')
self.assertContains(response,
"""<span class="small quiet">1 result (<a href="?">3 total</a>)</span>""",
html=True)
def test_no_total_count(self):
"""
#8408 -- "Show all" should be displayed instead of the total count if
ModelAdmin.show_full_result_count is False.
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
with self.assertNumQueries(4):
response = self.client.get(reverse('admin:admin_views_recommendation_changelist') + '?q=bar')
self.assertContains(response,
"""<span class="small quiet">1 result (<a href="?">Show all</a>)</span>""",
html=True)
self.assertTrue(response.context['cl'].show_admin_actions)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminInheritedInlinesTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_inline(self):
"Ensure that inline models which inherit from a common parent are correctly handled by admin."
foo_user = "foo username"
bar_user = "bar username"
name_re = re.compile(b'name="(.*?)"')
# test the add case
response = self.client.get(reverse('admin:admin_views_persona_add'))
names = name_re.findall(response.content)
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
# test the add case
post_data = {
"name": "Test Name",
# inline data
"accounts-TOTAL_FORMS": "1",
"accounts-INITIAL_FORMS": "0",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": foo_user,
"accounts-2-TOTAL_FORMS": "1",
"accounts-2-INITIAL_FORMS": "0",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": bar_user,
}
response = self.client.post(reverse('admin:admin_views_persona_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
persona_id = Persona.objects.all()[0].id
foo_id = FooAccount.objects.all()[0].id
bar_id = BarAccount.objects.all()[0].id
# test the edit case
response = self.client.get(reverse('admin:admin_views_persona_change', args=(persona_id,)))
names = name_re.findall(response.content)
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
post_data = {
"name": "Test Name",
"accounts-TOTAL_FORMS": "2",
"accounts-INITIAL_FORMS": "1",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": "%s-1" % foo_user,
"accounts-0-account_ptr": str(foo_id),
"accounts-0-persona": str(persona_id),
"accounts-2-TOTAL_FORMS": "2",
"accounts-2-INITIAL_FORMS": "1",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": "%s-1" % bar_user,
"accounts-2-0-account_ptr": str(bar_id),
"accounts-2-0-persona": str(persona_id),
}
response = self.client.post(reverse('admin:admin_views_persona_change', args=(persona_id,)), post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, "%s-1" % foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, "%s-1" % bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminActionsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = ExternalSubscriber.objects.create(name='John Doe', email='john@example.org')
cls.s2 = Subscriber.objects.create(name='Max Mustermann', email='max@example.org')
def setUp(self):
self.client.login(username='super', password='secret')
def test_model_admin_custom_action(self):
"Tests a custom action defined in a ModelAdmin method"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'mail_admin',
'index': 0,
}
self.client.post(reverse('admin:admin_views_subscriber_changelist'), action_data)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a ModelAdmin action')
def test_model_admin_default_delete_action(self):
"Tests the default delete action defined as a ModelAdmin method"
action_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action': 'delete_selected',
'index': 0,
}
delete_confirmation_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action': 'delete_selected',
'post': 'yes',
}
confirmation = self.client.post(reverse('admin:admin_views_subscriber_changelist'), action_data)
self.assertIsInstance(confirmation, TemplateResponse)
self.assertContains(confirmation, "Are you sure you want to delete the selected subscribers?")
self.assertContains(confirmation, "<h2>Summary</h2>")
self.assertContains(confirmation, "<li>Subscribers: 3</li>")
self.assertContains(confirmation, "<li>External subscribers: 1</li>")
self.assertContains(confirmation, ACTION_CHECKBOX_NAME, count=2)
self.client.post(reverse('admin:admin_views_subscriber_changelist'), delete_confirmation_data)
self.assertEqual(Subscriber.objects.count(), 0)
@override_settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=True)
def test_non_localized_pk(self):
"""If USE_THOUSAND_SEPARATOR is set, make sure that the ids for
the objects selected for deletion are rendered without separators.
Refs #14895.
"""
subscriber = Subscriber.objects.get(id=1)
subscriber.id = 9999
subscriber.save()
action_data = {
ACTION_CHECKBOX_NAME: [9999, 2],
'action': 'delete_selected',
'index': 0,
}
response = self.client.post(reverse('admin:admin_views_subscriber_changelist'), action_data)
self.assertTemplateUsed(response, 'admin/delete_selected_confirmation.html')
self.assertContains(response, 'value="9999"') # Instead of 9,999
self.assertContains(response, 'value="2"')
def test_model_admin_default_delete_action_protected(self):
"""
Tests the default delete action defined as a ModelAdmin method in the
case where some related objects are protected from deletion.
"""
q1 = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q1, answer="Because.")
a2 = Answer.objects.create(question=q1, answer="Yes.")
q2 = Question.objects.create(question="Wherefore?")
action_data = {
ACTION_CHECKBOX_NAME: [q1.pk, q2.pk],
'action': 'delete_selected',
'index': 0,
}
response = self.client.post(reverse('admin:admin_views_question_changelist'), action_data)
self.assertContains(response, "would require deleting the following protected related objects")
self.assertContains(
response,
'<li>Answer: <a href="%s">Because.</a></li>' % reverse('admin:admin_views_answer_change', args=(a1.pk,)),
html=True
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Yes.</a></li>' % reverse('admin:admin_views_answer_change', args=(a2.pk,)),
html=True
)
def test_model_admin_default_delete_action_no_change_url(self):
"""
Default delete action shouldn't break if a user's ModelAdmin removes the url for change_view.
Regression test for #20640
"""
obj = UnchangeableObject.objects.create()
action_data = {
ACTION_CHECKBOX_NAME: obj.pk,
"action": "delete_selected",
"index": "0",
}
response = self.client.post(reverse('admin:admin_views_unchangeableobject_changelist'), action_data)
# No 500 caused by NoReverseMatch
self.assertEqual(response.status_code, 200)
# The page shouldn't display a link to the nonexistent change page
self.assertContains(response, "<li>Unchangeable object: UnchangeableObject object</li>", 1, html=True)
def test_custom_function_mail_action(self):
"Tests a custom action defined in a function"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'external_mail',
'index': 0,
}
self.client.post(reverse('admin:admin_views_externalsubscriber_changelist'), action_data)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a function action')
def test_custom_function_action_with_redirect(self):
"Tests a custom action defined in a function"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'redirect_to',
'index': 0,
}
response = self.client.post(reverse('admin:admin_views_externalsubscriber_changelist'), action_data)
self.assertEqual(response.status_code, 302)
def test_default_redirect(self):
"""
Test that actions which don't return an HttpResponse are redirected to
the same page, retaining the querystring (which may contain changelist
information).
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'external_mail',
'index': 0,
}
url = reverse('admin:admin_views_externalsubscriber_changelist') + '?o=1'
response = self.client.post(url, action_data)
self.assertRedirects(response, url)
def test_custom_function_action_streaming_response(self):
"""Tests a custom action that returns a StreamingHttpResponse."""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'download',
'index': 0,
}
response = self.client.post(reverse('admin:admin_views_externalsubscriber_changelist'), action_data)
content = b''.join(response.streaming_content)
self.assertEqual(content, b'This is the content of the file')
self.assertEqual(response.status_code, 200)
def test_custom_function_action_no_perm_response(self):
"""Tests a custom action that returns an HttpResponse with 403 code."""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'no_perm',
'index': 0,
}
response = self.client.post(reverse('admin:admin_views_externalsubscriber_changelist'), action_data)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'No permission to perform this action')
def test_actions_ordering(self):
"""
Ensure that actions are ordered as expected.
Refs #15964.
"""
response = self.client.get(reverse('admin:admin_views_externalsubscriber_changelist'))
self.assertContains(response, '''<label>Action: <select name="action">
<option value="" selected="selected">---------</option>
<option value="delete_selected">Delete selected external
subscribers</option>
<option value="redirect_to">Redirect to (Awesome action)</option>
<option value="external_mail">External mail (Another awesome
action)</option>
<option value="download">Download subscription</option>
<option value="no_perm">No permission to run</option>
</select>''', html=True)
def test_model_without_action(self):
"Tests a ModelAdmin without any action"
response = self.client.get(reverse('admin:admin_views_oldsubscriber_changelist'))
self.assertEqual(response.context["action_form"], None)
self.assertNotContains(response, '<input type="checkbox" class="action-select"',
msg_prefix="Found an unexpected action toggle checkboxbox in response")
self.assertNotContains(response, '<input type="checkbox" class="action-select"')
def test_model_without_action_still_has_jquery(self):
"Tests that a ModelAdmin without any actions still gets jQuery included in page"
response = self.client.get(reverse('admin:admin_views_oldsubscriber_changelist'))
self.assertEqual(response.context["action_form"], None)
self.assertContains(response, 'jquery.min.js',
msg_prefix="jQuery missing from admin pages for model with no admin actions")
def test_action_column_class(self):
"Tests that the checkbox column class is present in the response"
response = self.client.get(reverse('admin:admin_views_subscriber_changelist'))
self.assertNotEqual(response.context["action_form"], None)
self.assertContains(response, 'action-checkbox-column')
def test_multiple_actions_form(self):
"""
Test that actions come from the form whose submit button was pressed (#10618).
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
# Two different actions selected on the two forms...
'action': ['external_mail', 'delete_selected'],
# ...but we clicked "go" on the top form.
'index': 0
}
self.client.post(reverse('admin:admin_views_externalsubscriber_changelist'), action_data)
# Send mail, don't delete.
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a function action')
def test_user_message_on_none_selected(self):
"""
User should see a warning when 'Go' is pressed and no items are selected.
"""
action_data = {
ACTION_CHECKBOX_NAME: [],
'action': 'delete_selected',
'index': 0,
}
response = self.client.post(reverse('admin:admin_views_subscriber_changelist'), action_data)
msg = """Items must be selected in order to perform actions on them. No items have been changed."""
self.assertContains(response, msg)
self.assertEqual(Subscriber.objects.count(), 2)
def test_user_message_on_no_action(self):
"""
User should see a warning when 'Go' is pressed and no action is selected.
"""
action_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action': '',
'index': 0,
}
response = self.client.post(reverse('admin:admin_views_subscriber_changelist'), action_data)
msg = """No action selected."""
self.assertContains(response, msg)
self.assertEqual(Subscriber.objects.count(), 2)
def test_selection_counter(self):
"""
Check if the selection counter is there.
"""
response = self.client.get(reverse('admin:admin_views_subscriber_changelist'))
self.assertContains(response, '0 of 2 selected')
def test_popup_actions(self):
""" Actions should not be shown in popups. """
response = self.client.get(reverse('admin:admin_views_subscriber_changelist'))
self.assertNotEqual(response.context["action_form"], None)
response = self.client.get(
reverse('admin:admin_views_subscriber_changelist') + '?%s' % IS_POPUP_VAR)
self.assertEqual(response.context["action_form"], None)
def test_popup_template_response(self):
"""
Success on popups shall be rendered from template in order to allow
easy customization.
"""
response = self.client.post(
reverse('admin:admin_views_actor_add') + '?%s=1' % IS_POPUP_VAR,
{'name': 'Troy McClure', 'age': '55', IS_POPUP_VAR: '1'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name, 'admin/popup_response.html')
def test_popup_template_escaping(self):
context = {
'new_value': 'new_value\\',
'obj': 'obj\\',
'value': 'value\\',
}
output = render_to_string('admin/popup_response.html', context)
self.assertIn(
'opener.dismissAddRelatedObjectPopup(window, "value\\u005C", "obj\\u005C");', output
)
context['action'] = 'change'
output = render_to_string('admin/popup_response.html', context)
self.assertIn(
'opener.dismissChangeRelatedObjectPopup(window, '
'"value\\u005C", "obj\\u005C", "new_value\\u005C");', output
)
context['action'] = 'delete'
output = render_to_string('admin/popup_response.html', context)
self.assertIn(
'opener.dismissDeleteRelatedObjectPopup(window, "value\\u005C");', output
)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class TestCustomChangeList(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def test_custom_changelist(self):
"""
Validate that a custom ChangeList class can be used (#9749)
"""
# Insert some data
post_data = {"name": "First Gadget"}
response = self.client.post(reverse('admin:admin_views_gadget_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
# Hit the page once to get messages out of the queue message list
response = self.client.get(reverse('admin:admin_views_gadget_changelist'))
# Ensure that data is still not visible on the page
response = self.client.get(reverse('admin:admin_views_gadget_changelist'))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'First Gadget')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class TestInlineNotEditable(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def test_GET_parent_add(self):
"""
InlineModelAdmin broken?
"""
response = self.client.get(reverse('admin:admin_views_parent_add'))
self.assertEqual(response.status_code, 200)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminCustomQuerysetTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
self.pks = [EmptyModel.objects.create().id for i in range(3)]
self.super_login = {
REDIRECT_FIELD_NAME: reverse('admin:index'),
'username': 'super',
'password': 'secret',
}
def test_changelist_view(self):
response = self.client.get(reverse('admin:admin_views_emptymodel_changelist'))
for i in self.pks:
if i > 1:
self.assertContains(response, 'Primary key = %s' % i)
else:
self.assertNotContains(response, 'Primary key = %s' % i)
def test_changelist_view_count_queries(self):
# create 2 Person objects
Person.objects.create(name='person1', gender=1)
Person.objects.create(name='person2', gender=2)
changelist_url = reverse('admin:admin_views_person_changelist')
# 4 queries are expected: 1 for the session, 1 for the user,
# 1 for the count and 1 for the objects on the page
with self.assertNumQueries(4):
resp = self.client.get(changelist_url)
self.assertEqual(resp.context['selection_note'], '0 of 2 selected')
self.assertEqual(resp.context['selection_note_all'], 'All 2 selected')
# here one more count(*) query will run, because filters were applied
with self.assertNumQueries(5):
extra = {'q': 'not_in_name'}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context['selection_note'], '0 of 0 selected')
self.assertEqual(resp.context['selection_note_all'], 'All 0 selected')
with self.assertNumQueries(5):
extra = {'q': 'person'}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context['selection_note'], '0 of 2 selected')
self.assertEqual(resp.context['selection_note_all'], 'All 2 selected')
with self.assertNumQueries(5):
extra = {'gender__exact': '1'}
resp = self.client.get(changelist_url, extra)
self.assertEqual(resp.context['selection_note'], '0 of 1 selected')
self.assertEqual(resp.context['selection_note_all'], '1 selected')
def test_change_view(self):
for i in self.pks:
response = self.client.get(reverse('admin:admin_views_emptymodel_change', args=(i,)))
if i > 1:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 404)
def test_add_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __unicode__ method
self.assertEqual(CoverLetter.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"author": "Candidate, Best",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_coverletter_add'),
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name
self.assertContains(
response,
'<li class="success">The cover letter "Candidate, Best" was added successfully.</li>',
html=True
)
# model has no __unicode__ method
self.assertEqual(ShortMessage.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"content": "What's this SMS thing?",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_shortmessage_add'),
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name
self.assertContains(
response,
'<li class="success">The short message "ShortMessage object" was added successfully.</li>',
html=True
)
def test_add_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __unicode__ method
self.assertEqual(Telegram.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "Urgent telegram",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_telegram_add'),
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name
self.assertContains(
response,
'<li class="success">The telegram "Urgent telegram" was added successfully.</li>',
html=True
)
# model has no __unicode__ method
self.assertEqual(Paper.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_paper_add'),
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name
self.assertContains(
response,
'<li class="success">The paper "Paper object" was added successfully.</li>',
html=True
)
def test_edit_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __unicode__ method
cl = CoverLetter.objects.create(author="John Doe")
self.assertEqual(CoverLetter.objects.count(), 1)
response = self.client.get(reverse('admin:admin_views_coverletter_change', args=(cl.pk,)))
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"author": "John Doe II",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_coverletter_change', args=(cl.pk,)),
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name. Instance
# representation is set by model's __unicode__()
self.assertContains(
response,
'<li class="success">The cover letter "John Doe II" was changed successfully.</li>',
html=True
)
# model has no __unicode__ method
sm = ShortMessage.objects.create(content="This is expensive")
self.assertEqual(ShortMessage.objects.count(), 1)
response = self.client.get(reverse('admin:admin_views_shortmessage_change', args=(sm.pk,)))
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"content": "Too expensive",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_shortmessage_change', args=(sm.pk,)),
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by six.text_type()
self.assertContains(
response,
'<li class="success">The short message "ShortMessage_Deferred_timestamp object" was changed successfully.</li>',
html=True
)
def test_edit_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __unicode__ method
t = Telegram.objects.create(title="Frist Telegram")
self.assertEqual(Telegram.objects.count(), 1)
response = self.client.get(reverse('admin:admin_views_telegram_change', args=(t.pk,)))
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "Telegram without typo",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_telegram_change', args=(t.pk,)),
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name. The instance
# representation is set by model's __unicode__()
self.assertContains(
response,
'<li class="success">The telegram "Telegram without typo" was changed successfully.</li>',
html=True
)
# model has no __unicode__ method
p = Paper.objects.create(title="My Paper Title")
self.assertEqual(Paper.objects.count(), 1)
response = self.client.get(reverse('admin:admin_views_paper_change', args=(p.pk,)))
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post(reverse('admin:admin_views_paper_change', args=(p.pk,)),
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by six.text_type()
self.assertContains(
response,
'<li class="success">The paper "Paper_Deferred_author object" was changed successfully.</li>',
html=True
)
def test_history_view_custom_qs(self):
"""
Ensure that custom querysets are considered for the admin history view.
Refs #21013.
"""
self.client.post(reverse('admin:login'), self.super_login)
FilteredManager.objects.create(pk=1)
FilteredManager.objects.create(pk=2)
response = self.client.get(reverse('admin:admin_views_filteredmanager_changelist'))
self.assertContains(response, "PK=1")
self.assertContains(response, "PK=2")
self.assertEqual(
self.client.get(reverse('admin:admin_views_filteredmanager_history', args=(1,))).status_code, 200
)
self.assertEqual(
self.client.get(reverse('admin:admin_views_filteredmanager_history', args=(2,))).status_code, 200
)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminInlineFileUploadTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
# Set up test Picture and Gallery.
# These must be set up here instead of in fixtures in order to allow Picture
# to use a NamedTemporaryFile.
file1 = tempfile.NamedTemporaryFile(suffix=".file1")
file1.write(b'a' * (2 ** 21))
filename = file1.name
file1.close()
self.gallery = Gallery(name="Test Gallery")
self.gallery.save()
self.picture = Picture(name="Test Picture", image=filename, gallery=self.gallery)
self.picture.save()
def test_inline_file_upload_edit_validation_error_post(self):
"""
Test that inline file uploads correctly display prior data (#10002).
"""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post(
reverse('admin:admin_views_gallery_change', args=(self.gallery.id,)), post_data
)
self.assertContains(response, b"Currently")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminInlineTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
"widget_set-0-name": "",
"widget_set-1-id": "",
"widget_set-1-owner": "1",
"widget_set-1-name": "",
"widget_set-2-id": "",
"widget_set-2-owner": "1",
"widget_set-2-name": "",
"doohickey_set-TOTAL_FORMS": "3",
"doohickey_set-INITIAL_FORMS": "0",
"doohickey_set-MAX_NUM_FORMS": "0",
"doohickey_set-0-owner": "1",
"doohickey_set-0-code": "",
"doohickey_set-0-name": "",
"doohickey_set-1-owner": "1",
"doohickey_set-1-code": "",
"doohickey_set-1-name": "",
"doohickey_set-2-owner": "1",
"doohickey_set-2-code": "",
"doohickey_set-2-name": "",
"grommet_set-TOTAL_FORMS": "3",
"grommet_set-INITIAL_FORMS": "0",
"grommet_set-MAX_NUM_FORMS": "0",
"grommet_set-0-code": "",
"grommet_set-0-owner": "1",
"grommet_set-0-name": "",
"grommet_set-1-code": "",
"grommet_set-1-owner": "1",
"grommet_set-1-name": "",
"grommet_set-2-code": "",
"grommet_set-2-owner": "1",
"grommet_set-2-name": "",
"whatsit_set-TOTAL_FORMS": "3",
"whatsit_set-INITIAL_FORMS": "0",
"whatsit_set-MAX_NUM_FORMS": "0",
"whatsit_set-0-owner": "1",
"whatsit_set-0-index": "",
"whatsit_set-0-name": "",
"whatsit_set-1-owner": "1",
"whatsit_set-1-index": "",
"whatsit_set-1-name": "",
"whatsit_set-2-owner": "1",
"whatsit_set-2-index": "",
"whatsit_set-2-name": "",
"fancydoodad_set-TOTAL_FORMS": "3",
"fancydoodad_set-INITIAL_FORMS": "0",
"fancydoodad_set-MAX_NUM_FORMS": "0",
"fancydoodad_set-0-doodad_ptr": "",
"fancydoodad_set-0-owner": "1",
"fancydoodad_set-0-name": "",
"fancydoodad_set-0-expensive": "on",
"fancydoodad_set-1-doodad_ptr": "",
"fancydoodad_set-1-owner": "1",
"fancydoodad_set-1-name": "",
"fancydoodad_set-1-expensive": "on",
"fancydoodad_set-2-doodad_ptr": "",
"fancydoodad_set-2-owner": "1",
"fancydoodad_set-2-name": "",
"fancydoodad_set-2-expensive": "on",
"category_set-TOTAL_FORMS": "3",
"category_set-INITIAL_FORMS": "0",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "",
"category_set-0-id": "",
"category_set-0-collector": "1",
"category_set-1-order": "",
"category_set-1-id": "",
"category_set-1-collector": "1",
"category_set-2-order": "",
"category_set-2-id": "",
"category_set-2-collector": "1",
}
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
self.collector = Collector(pk=1, name='John Fowles')
self.collector.save()
def test_simple_inline(self):
"A simple model can be saved as inlines"
# First add a new inline
self.post_data['widget_set-0-name'] = "Widget 1"
collector_url = reverse('admin:admin_views_collector_change', args=(self.collector.pk,))
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
widget_id = Widget.objects.all()[0].id
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="widget_set-0-id"')
# Now resave that inline
self.post_data['widget_set-INITIAL_FORMS'] = "1"
self.post_data['widget_set-0-id'] = str(widget_id)
self.post_data['widget_set-0-name'] = "Widget 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
# Now modify that inline
self.post_data['widget_set-INITIAL_FORMS'] = "1"
self.post_data['widget_set-0-id'] = str(widget_id)
self.post_data['widget_set-0-name'] = "Widget 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1 Updated")
def test_explicit_autofield_inline(self):
"A model with an explicit autofield primary key can be saved as inlines. Regression for #8093"
# First add a new inline
self.post_data['grommet_set-0-name'] = "Grommet 1"
collector_url = reverse('admin:admin_views_collector_change', args=(self.collector.pk,))
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="grommet_set-0-code"')
# Now resave that inline
self.post_data['grommet_set-INITIAL_FORMS'] = "1"
self.post_data['grommet_set-0-code'] = str(Grommet.objects.all()[0].code)
self.post_data['grommet_set-0-name'] = "Grommet 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Now modify that inline
self.post_data['grommet_set-INITIAL_FORMS'] = "1"
self.post_data['grommet_set-0-code'] = str(Grommet.objects.all()[0].code)
self.post_data['grommet_set-0-name'] = "Grommet 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1 Updated")
def test_char_pk_inline(self):
"A model with a character PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1"
collector_url = reverse('admin:admin_views_collector_change', args=(self.collector.pk,))
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="doohickey_set-0-code"')
# Now resave that inline
self.post_data['doohickey_set-INITIAL_FORMS'] = "1"
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Now modify that inline
self.post_data['doohickey_set-INITIAL_FORMS'] = "1"
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1 Updated")
def test_integer_pk_inline(self):
"A model with an integer PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1"
collector_url = reverse('admin:admin_views_collector_change', args=(self.collector.pk,))
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="whatsit_set-0-index"')
# Now resave that inline
self.post_data['whatsit_set-INITIAL_FORMS'] = "1"
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Now modify that inline
self.post_data['whatsit_set-INITIAL_FORMS'] = "1"
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1 Updated")
def test_inherited_inline(self):
"An inherited model can be saved as inlines. Regression for #11042"
# First add a new inline
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1"
collector_url = reverse('admin:admin_views_collector_change', args=(self.collector.pk,))
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
doodad_pk = FancyDoodad.objects.all()[0].pk
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="fancydoodad_set-0-doodad_ptr"')
# Now resave that inline
self.post_data['fancydoodad_set-INITIAL_FORMS'] = "1"
self.post_data['fancydoodad_set-0-doodad_ptr'] = str(doodad_pk)
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
# Now modify that inline
self.post_data['fancydoodad_set-INITIAL_FORMS'] = "1"
self.post_data['fancydoodad_set-0-doodad_ptr'] = str(doodad_pk)
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1 Updated")
def test_ordered_inline(self):
"""Check that an inline with an editable ordering fields is
updated correctly. Regression for #10922"""
# Create some objects with an initial ordering
Category.objects.create(id=1, order=1, collector=self.collector)
Category.objects.create(id=2, order=2, collector=self.collector)
Category.objects.create(id=3, order=0, collector=self.collector)
Category.objects.create(id=4, order=0, collector=self.collector)
# NB: The order values must be changed so that the items are reordered.
self.post_data.update({
"name": "Frederick Clegg",
"category_set-TOTAL_FORMS": "7",
"category_set-INITIAL_FORMS": "4",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "14",
"category_set-0-id": "1",
"category_set-0-collector": "1",
"category_set-1-order": "13",
"category_set-1-id": "2",
"category_set-1-collector": "1",
"category_set-2-order": "1",
"category_set-2-id": "3",
"category_set-2-collector": "1",
"category_set-3-order": "0",
"category_set-3-id": "4",
"category_set-3-collector": "1",
"category_set-4-order": "",
"category_set-4-id": "",
"category_set-4-collector": "1",
"category_set-5-order": "",
"category_set-5-id": "",
"category_set-5-collector": "1",
"category_set-6-order": "",
"category_set-6-id": "",
"category_set-6-collector": "1",
})
collector_url = reverse('admin:admin_views_collector_change', args=(self.collector.pk,))
response = self.client.post(collector_url, self.post_data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# Check that the order values have been applied to the right objects
self.assertEqual(self.collector.category_set.count(), 4)
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class NeverCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = Section.objects.create(name='Test section')
def setUp(self):
self.client.login(username='super', password='secret')
def test_admin_index(self):
"Check the never-cache status of the main index"
response = self.client.get(reverse('admin:index'))
self.assertEqual(get_max_age(response), 0)
def test_app_index(self):
"Check the never-cache status of an application index"
response = self.client.get(reverse('admin:app_list', args=('admin_views',)))
self.assertEqual(get_max_age(response), 0)
def test_model_index(self):
"Check the never-cache status of a model index"
response = self.client.get(reverse('admin:admin_views_fabric_changelist'))
self.assertEqual(get_max_age(response), 0)
def test_model_add(self):
"Check the never-cache status of a model add page"
response = self.client.get(reverse('admin:admin_views_fabric_add'))
self.assertEqual(get_max_age(response), 0)
def test_model_view(self):
"Check the never-cache status of a model edit page"
response = self.client.get(reverse('admin:admin_views_section_change', args=(self.s1.pk,)))
self.assertEqual(get_max_age(response), 0)
def test_model_history(self):
"Check the never-cache status of a model history page"
response = self.client.get(reverse('admin:admin_views_section_history', args=(self.s1.pk,)))
self.assertEqual(get_max_age(response), 0)
def test_model_delete(self):
"Check the never-cache status of a model delete page"
response = self.client.get(reverse('admin:admin_views_section_delete', args=(self.s1.pk,)))
self.assertEqual(get_max_age(response), 0)
def test_login(self):
"Check the never-cache status of login views"
self.client.logout()
response = self.client.get(reverse('admin:index'))
self.assertEqual(get_max_age(response), 0)
def test_logout(self):
"Check the never-cache status of logout view"
response = self.client.get(reverse('admin:logout'))
self.assertEqual(get_max_age(response), 0)
def test_password_change(self):
"Check the never-cache status of the password change view"
self.client.logout()
response = self.client.get(reverse('admin:password_change'))
self.assertEqual(get_max_age(response), None)
def test_password_change_done(self):
"Check the never-cache status of the password change done view"
response = self.client.get(reverse('admin:password_change_done'))
self.assertEqual(get_max_age(response), None)
def test_JS_i18n(self):
"Check the never-cache status of the JavaScript i18n view"
response = self.client.get(reverse('admin:jsi18n'))
self.assertEqual(get_max_age(response), None)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class PrePopulatedTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
def setUp(self):
self.client.login(username='super', password='secret')
def test_prepopulated_on(self):
response = self.client.get(reverse('admin:admin_views_prepopulatedpost_add'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "id: '#id_slug',")
self.assertContains(response, "field['dependency_ids'].push('#id_title');")
self.assertContains(response, "id: '#id_prepopulatedsubpost_set-0-subslug',")
def test_prepopulated_off(self):
response = self.client.get(reverse('admin:admin_views_prepopulatedpost_change', args=(self.p1.pk,)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "A Long Title")
self.assertNotContains(response, "id: '#id_slug'")
self.assertNotContains(response, "field['dependency_ids'].push('#id_title');")
self.assertNotContains(response, "id: '#id_prepopulatedsubpost_set-0-subslug',")
@override_settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=True)
def test_prepopulated_maxlength_localized(self):
"""
Regression test for #15938: if USE_THOUSAND_SEPARATOR is set, make sure
that maxLength (in the JavaScript) is rendered without separators.
"""
response = self.client.get(reverse('admin:admin_views_prepopulatedpostlargeslug_add'))
self.assertContains(response, "maxLength: 1000") # instead of 1,000
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class SeleniumAdminViewsFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_views'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
self.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
self.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
def test_prepopulated_fields(self):
"""
Ensure that the JavaScript-automated prepopulated fields work with the
main form and with stacked and tabular inlines.
Refs #13068, #9264, #9983, #9784.
"""
self.admin_login(username='super', password='secret', login_url=reverse('admin:index'))
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_views_mainprepopulated_add')))
# Main form ----------------------------------------------------------
self.selenium.find_element_by_css_selector('#id_pubdate').send_keys('2012-02-18')
self.get_select_option('#id_status', 'option two').click()
self.selenium.find_element_by_css_selector('#id_name').send_keys(' this is the mAin nÀMë and it\'s awεšomeııı')
slug1 = self.selenium.find_element_by_css_selector('#id_slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_slug2').get_attribute('value')
slug3 = self.selenium.find_element_by_css_selector('#id_slug3').get_attribute('value')
self.assertEqual(slug1, 'main-name-and-its-awesomeiii-2012-02-18')
self.assertEqual(slug2, 'option-two-main-name-and-its-awesomeiii')
self.assertEqual(slug3, 'main-n\xe0m\xeb-and-its-aw\u03b5\u0161ome\u0131\u0131\u0131')
# Stacked inlines ----------------------------------------------------
# Initial inline
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-0-pubdate').send_keys('2011-12-17')
self.get_select_option('#id_relatedprepopulated_set-0-status', 'option one').click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-0-name').send_keys(' here is a sŤāÇkeð inline ! ')
slug1 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-0-slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-0-slug2').get_attribute('value')
self.assertEqual(slug1, 'here-stacked-inline-2011-12-17')
self.assertEqual(slug2, 'option-one-here-stacked-inline')
# Add an inline
self.selenium.find_elements_by_link_text('Add another Related prepopulated')[0].click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-1-pubdate').send_keys('1999-01-25')
self.get_select_option('#id_relatedprepopulated_set-1-status', 'option two').click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-1-name').send_keys(' now you haVe anöther sŤāÇkeð inline with a very ... loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog text... ')
slug1 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-1-slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-1-slug2').get_attribute('value')
self.assertEqual(slug1, 'now-you-have-another-stacked-inline-very-loooooooo') # 50 characters maximum for slug1 field
self.assertEqual(slug2, 'option-two-now-you-have-another-stacked-inline-very-looooooo') # 60 characters maximum for slug2 field
# Tabular inlines ----------------------------------------------------
# Initial inline
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-0-pubdate').send_keys('1234-12-07')
self.get_select_option('#id_relatedprepopulated_set-2-0-status', 'option two').click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-0-name').send_keys('And now, with a tÃbűlaŘ inline !!!')
slug1 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-0-slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-0-slug2').get_attribute('value')
self.assertEqual(slug1, 'and-now-tabular-inline-1234-12-07')
self.assertEqual(slug2, 'option-two-and-now-tabular-inline')
# Add an inline
self.selenium.find_elements_by_link_text('Add another Related prepopulated')[1].click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-1-pubdate').send_keys('1981-08-22')
self.get_select_option('#id_relatedprepopulated_set-2-1-status', 'option one').click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-1-name').send_keys('a tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters')
slug1 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-1-slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-1-slug2').get_attribute('value')
self.assertEqual(slug1, 'tabular-inline-ignored-characters-1981-08-22')
self.assertEqual(slug2, 'option-one-tabular-inline-ignored-characters')
# Save and check that everything is properly stored in the database
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.assertEqual(MainPrepopulated.objects.all().count(), 1)
MainPrepopulated.objects.get(
name=' this is the mAin nÀMë and it\'s awεšomeııı',
pubdate='2012-02-18',
status='option two',
slug1='main-name-and-its-awesomeiii-2012-02-18',
slug2='option-two-main-name-and-its-awesomeiii',
)
self.assertEqual(RelatedPrepopulated.objects.all().count(), 4)
RelatedPrepopulated.objects.get(
name=' here is a sŤāÇkeð inline ! ',
pubdate='2011-12-17',
status='option one',
slug1='here-stacked-inline-2011-12-17',
slug2='option-one-here-stacked-inline',
)
RelatedPrepopulated.objects.get(
name=' now you haVe anöther sŤāÇkeð inline with a very ... loooooooooooooooooo', # 75 characters in name field
pubdate='1999-01-25',
status='option two',
slug1='now-you-have-another-stacked-inline-very-loooooooo',
slug2='option-two-now-you-have-another-stacked-inline-very-looooooo',
)
RelatedPrepopulated.objects.get(
name='And now, with a tÃbűlaŘ inline !!!',
pubdate='1234-12-07',
status='option two',
slug1='and-now-tabular-inline-1234-12-07',
slug2='option-two-and-now-tabular-inline',
)
RelatedPrepopulated.objects.get(
name='a tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters',
pubdate='1981-08-22',
status='option one',
slug1='tabular-inline-ignored-characters-1981-08-22',
slug2='option-one-tabular-inline-ignored-characters',
)
def test_populate_existing_object(self):
"""
Ensure that the prepopulation works for existing objects too, as long
as the original field is empty.
Refs #19082.
"""
# Slugs are empty to start with.
item = MainPrepopulated.objects.create(
name=' this is the mAin nÀMë',
pubdate='2012-02-18',
status='option two',
slug1='',
slug2='',
)
self.admin_login(username='super',
password='secret',
login_url=reverse('admin:index'))
object_url = '%s%s' % (
self.live_server_url,
reverse('admin:admin_views_mainprepopulated_change', args=(item.id,)))
self.selenium.get(object_url)
self.selenium.find_element_by_css_selector('#id_name').send_keys(' the best')
# The slugs got prepopulated since they were originally empty
slug1 = self.selenium.find_element_by_css_selector('#id_slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_slug2').get_attribute('value')
self.assertEqual(slug1, 'main-name-best-2012-02-18')
self.assertEqual(slug2, 'option-two-main-name-best')
# Save the object
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.selenium.get(object_url)
self.selenium.find_element_by_css_selector('#id_name').send_keys(' hello')
# The slugs got prepopulated didn't change since they were originally not empty
slug1 = self.selenium.find_element_by_css_selector('#id_slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_slug2').get_attribute('value')
self.assertEqual(slug1, 'main-name-best-2012-02-18')
self.assertEqual(slug2, 'option-two-main-name-best')
def test_collapsible_fieldset(self):
"""
Test that the 'collapse' class in fieldsets definition allows to
show/hide the appropriate field section.
"""
self.admin_login(username='super', password='secret', login_url=reverse('admin:index'))
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_views_article_add')))
self.assertFalse(self.selenium.find_element_by_id('id_title').is_displayed())
self.selenium.find_elements_by_link_text('Show')[0].click()
self.assertTrue(self.selenium.find_element_by_id('id_title').is_displayed())
self.assertEqual(
self.selenium.find_element_by_id('fieldsetcollapser0').text,
"Hide"
)
def test_first_field_focus(self):
"""JavaScript-assisted auto-focus on first usable form field."""
# First form field has a single widget
self.admin_login(username='super', password='secret', login_url=reverse('admin:index'))
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_views_picture_add')))
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element_by_id('id_name')
)
# First form field has a MultiWidget
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_views_reservation_add')))
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element_by_id('id_start_date_0')
)
def test_cancel_delete_confirmation(self):
"Cancelling the deletion of an object takes the user back one page."
pizza = Pizza.objects.create(name="Double Cheese")
url = reverse('admin:admin_views_pizza_change', args=(pizza.id,))
full_url = '%s%s' % (self.live_server_url, url)
self.admin_login(username='super', password='secret', login_url=reverse('admin:index'))
self.selenium.get(full_url)
self.selenium.find_element_by_class_name('deletelink').click()
# Wait until we're on the delete page.
self.wait_for('.cancel-link')
self.selenium.find_element_by_class_name('cancel-link').click()
# Wait until we're back on the change page.
self.wait_for_text('#content h1', 'Change pizza')
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
def test_cancel_delete_related_confirmation(self):
"""
Cancelling the deletion of an object with relations takes the user back
one page.
"""
pizza = Pizza.objects.create(name="Double Cheese")
topping1 = Topping.objects.create(name="Cheddar")
topping2 = Topping.objects.create(name="Mozzarella")
pizza.toppings.add(topping1, topping2)
url = reverse('admin:admin_views_pizza_change', args=(pizza.id,))
full_url = '%s%s' % (self.live_server_url, url)
self.admin_login(username='super', password='secret', login_url=reverse('admin:index'))
self.selenium.get(full_url)
self.selenium.find_element_by_class_name('deletelink').click()
# Wait until we're on the delete page.
self.wait_for('.cancel-link')
self.selenium.find_element_by_class_name('cancel-link').click()
# Wait until we're back on the change page.
self.wait_for_text('#content h1', 'Change pizza')
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
self.assertEqual(Topping.objects.count(), 2)
class SeleniumAdminViewsChromeTests(SeleniumAdminViewsFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumAdminViewsIETests(SeleniumAdminViewsFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class ReadonlyTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_readonly_get(self):
response = self.client.get(reverse('admin:admin_views_post_add'))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'name="posted"')
# 3 fields + 2 submit buttons + 5 inline management form fields, + 2
# hidden fields for inlines + 1 field for the inline + 2 empty form
self.assertContains(response, "<input", count=15)
self.assertContains(response, formats.localize(datetime.date.today()))
self.assertContains(response,
"<label>Awesomeness level:</label>")
self.assertContains(response, "Very awesome.")
self.assertContains(response, "Unknown coolness.")
self.assertContains(response, "foo")
# Checks that multiline text in a readonly field gets <br /> tags
self.assertContains(response, "Multiline<br />test<br />string")
self.assertContains(response, "<p>Multiline<br />html<br />content</p>", html=True)
self.assertContains(response, "InlineMultiline<br />test<br />string")
self.assertContains(response,
formats.localize(datetime.date.today() - datetime.timedelta(days=7)))
self.assertContains(response, '<div class="form-row field-coolness">')
self.assertContains(response, '<div class="form-row field-awesomeness_level">')
self.assertContains(response, '<div class="form-row field-posted">')
self.assertContains(response, '<div class="form-row field-value">')
self.assertContains(response, '<div class="form-row">')
self.assertContains(response, '<p class="help">', 3)
self.assertContains(response, '<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>', html=True)
self.assertContains(response, '<p class="help">Some help text for the content (with unicode ŠĐĆŽćžšđ)</p>', html=True)
self.assertContains(response, '<p class="help">Some help text for the date (with unicode ŠĐĆŽćžšđ)</p>', html=True)
p = Post.objects.create(title="I worked on readonly_fields", content="Its good stuff")
response = self.client.get(reverse('admin:admin_views_post_change', args=(p.pk,)))
self.assertContains(response, "%d amount of cool" % p.pk)
def test_readonly_post(self):
data = {
"title": "Django Got Readonly Fields",
"content": "This is an incredible development.",
"link_set-TOTAL_FORMS": "1",
"link_set-INITIAL_FORMS": "0",
"link_set-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse('admin:admin_views_post_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 1)
p = Post.objects.get()
self.assertEqual(p.posted, datetime.date.today())
data["posted"] = "10-8-1990" # some date that's not today
response = self.client.post(reverse('admin:admin_views_post_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 2)
p = Post.objects.order_by('-id')[0]
self.assertEqual(p.posted, datetime.date.today())
def test_readonly_manytomany(self):
"Regression test for #13004"
response = self.client.get(reverse('admin:admin_views_pizza_add'))
self.assertEqual(response.status_code, 200)
def test_user_password_change_limited_queryset(self):
su = User.objects.filter(is_superuser=True)[0]
response = self.client.get(reverse('admin2:auth_user_password_change', args=(su.pk,)))
self.assertEqual(response.status_code, 404)
def test_change_form_renders_correct_null_choice_value(self):
"""
Regression test for #17911.
"""
choice = Choice.objects.create(choice=None)
response = self.client.get(reverse('admin:admin_views_choice_change', args=(choice.pk,)))
self.assertContains(response, '<p>No opinion</p>', html=True)
self.assertNotContains(response, '<p>(None)</p>')
def test_readonly_backwards_ref(self):
"""
Regression test for #16433 - backwards references for related objects
broke if the related field is read-only due to the help_text attribute
"""
topping = Topping.objects.create(name='Salami')
pizza = Pizza.objects.create(name='Americano')
pizza.toppings.add(topping)
response = self.client.get(reverse('admin:admin_views_topping_add'))
self.assertEqual(response.status_code, 200)
def test_readonly_field_overrides(self):
"""
Regression test for #22087 - ModelForm Meta overrides are ignored by
AdminReadonlyField
"""
p = FieldOverridePost.objects.create(title="Test Post", content="Test Content")
response = self.client.get(reverse('admin:admin_views_fieldoverridepost_change', args=(p.pk,)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<p class="help">Overridden help text for the date</p>')
self.assertContains(response, '<label for="id_public">Overridden public label:</label>', html=True)
self.assertNotContains(response, "Some help text for the date (with unicode ŠĐĆŽćžšđ)")
def test_correct_autoescaping(self):
"""
Make sure that non-field readonly elements are properly autoescaped (#24461)
"""
section = Section.objects.create(name='<a>evil</a>')
response = self.client.get(reverse('admin:admin_views_section_change', args=(section.pk,)))
self.assertNotContains(response, "<a>evil</a>", status_code=200)
self.assertContains(response, "<a>evil</a>", status_code=200)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class LimitChoicesToInAdminTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_limit_choices_to_as_callable(self):
"""Test for ticket 2445 changes to admin."""
threepwood = Character.objects.create(
username='threepwood',
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
marley = Character.objects.create(
username='marley',
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
response = self.client.get(reverse('admin:admin_views_stumpjoke_add'))
# The allowed option should appear twice; the limited option should not appear.
self.assertContains(response, threepwood.username, count=2)
self.assertNotContains(response, marley.username)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class RawIdFieldsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_limit_choices_to(self):
"""Regression test for 14880"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True,
leader=actor,
country="England")
Inquisition.objects.create(expected=False,
leader=actor,
country="Spain")
response = self.client.get(reverse('admin:admin_views_sketch_add'))
# Find the link
m = re.search(br'<a href="([^"]*)"[^>]* id="lookup_id_inquisition"', response.content)
self.assertTrue(m) # Got a match
popup_url = m.groups()[0].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step also tests integers, strings and booleans in the
# lookup query string; in model we define inquisition field to have a
# limit_choices_to option that includes a filter on a string field
# (inquisition__actor__name), a filter on an integer field
# (inquisition__actor__age), and a filter on a boolean field
# (inquisition__expected).
response2 = self.client.get(popup_url)
self.assertContains(response2, "Spain")
self.assertNotContains(response2, "England")
def test_limit_choices_to_isnull_false(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse('admin:admin_views_sketch_add'))
# Find the link
m = re.search(br'<a href="([^"]*)"[^>]* id="lookup_id_defendant0"', response.content)
self.assertTrue(m) # Got a match
popup_url = m.groups()[0].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=0 gets parsed correctly from the
# lookup query string; in model we define defendant0 field to have a
# limit_choices_to option that includes "actor__title__isnull=False".
response2 = self.client.get(popup_url)
self.assertContains(response2, "Kilbraken")
self.assertNotContains(response2, "Palin")
def test_limit_choices_to_isnull_true(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get(reverse('admin:admin_views_sketch_add'))
# Find the link
m = re.search(br'<a href="([^"]*)"[^>]* id="lookup_id_defendant1"', response.content)
self.assertTrue(m) # Got a match
popup_url = m.groups()[0].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=1 gets parsed correctly from the
# lookup query string; in model we define defendant1 field to have a
# limit_choices_to option that includes "actor__title__isnull=True".
response2 = self.client.get(popup_url)
self.assertNotContains(response2, "Kilbraken")
self.assertContains(response2, "Palin")
def test_list_display_method_same_name_as_reverse_accessor(self):
"""
Should be able to use a ModelAdmin method in list_display that has the
same name as a reverse model field ("sketch" in this case).
"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True, leader=actor, country="England")
response = self.client.get(reverse('admin:admin_views_inquisition_changelist'))
self.assertContains(response, 'list-display-sketch')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class UserAdminTest(TestCase):
"""
Tests user CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
id=101, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='adduser',
first_name='Add', last_name='User', email='auser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u3 = User.objects.create(
id=102, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='changeuser',
first_name='Change', last_name='User', email='cuser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u4 = User.objects.create(
id=103, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='deleteuser',
first_name='Delete', last_name='User', email='duser@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u5 = User.objects.create(
id=104, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='joepublic',
first_name='Joe', last_name='Public', email='joepublic@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u6 = User.objects.create(
id=106, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='nostaff',
first_name='No', last_name='Staff', email='nostaff@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
cls.per1 = Person.objects.create(name='John Mauchly', gender=1, alive=True)
cls.per2 = Person.objects.create(name='Grace Hopper', gender=1, alive=False)
cls.per3 = Person.objects.create(name='Guido van Rossum', gender=1, alive=True)
def setUp(self):
self.client.login(username='super', password='secret')
def test_save_button(self):
user_count = User.objects.count()
response = self.client.post(reverse('admin:auth_user_add'), {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
})
new_user = User.objects.get(username='newuser')
self.assertRedirects(response, reverse('admin:auth_user_change', args=(new_user.pk,)))
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_save_continue_editing_button(self):
user_count = User.objects.count()
response = self.client.post(reverse('admin:auth_user_add'), {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_continue': '1',
})
new_user = User.objects.get(username='newuser')
self.assertRedirects(response, reverse('admin:auth_user_change', args=(new_user.pk,)))
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_password_mismatch(self):
response = self.client.post(reverse('admin:auth_user_add'), {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'mismatch',
})
self.assertEqual(response.status_code, 200)
adminform = response.context['adminform']
self.assertNotIn('password', adminform.form.errors)
self.assertEqual(adminform.form.errors['password2'],
["The two password fields didn't match."])
def test_user_fk_add_popup(self):
"""User addition through a FK popup should return the appropriate JavaScript response."""
response = self.client.get(reverse('admin:admin_views_album_add'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('admin:auth_user_add'))
self.assertContains(response, 'class="related-widget-wrapper-link add-related" id="add_id_owner"')
response = self.client.get(reverse('admin:auth_user_add') + '?_popup=1')
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_popup': '1',
'_save': '1',
}
response = self.client.post(reverse('admin:auth_user_add') + '?_popup=1', data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'dismissAddRelatedObjectPopup')
def test_user_fk_change_popup(self):
"""User change through a FK popup should return the appropriate JavaScript response."""
response = self.client.get(reverse('admin:admin_views_album_add'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('admin:auth_user_change', args=('__fk__',)))
self.assertContains(response, 'class="related-widget-wrapper-link change-related" id="change_id_owner"')
user = User.objects.get(username='changeuser')
url = reverse('admin:auth_user_change', args=(user.pk,)) + '?_popup=1'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'last_login_0': '2007-05-30',
'last_login_1': '13:20:10',
'date_joined_0': '2007-05-30',
'date_joined_1': '13:20:10',
'_popup': '1',
'_save': '1',
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'dismissChangeRelatedObjectPopup')
def test_user_fk_delete_popup(self):
"""User deletion through a FK popup should return the appropriate JavaScript response."""
response = self.client.get(reverse('admin:admin_views_album_add'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('admin:auth_user_delete', args=('__fk__',)))
self.assertContains(response, 'class="related-widget-wrapper-link change-related" id="change_id_owner"')
user = User.objects.get(username='changeuser')
url = reverse('admin:auth_user_delete', args=(user.pk,)) + '?_popup=1'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = {
'post': 'yes',
'_popup': '1',
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'dismissDeleteRelatedObjectPopup')
def test_save_add_another_button(self):
user_count = User.objects.count()
response = self.client.post(reverse('admin:auth_user_add'), {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_addanother': '1',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, reverse('admin:auth_user_add'))
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_user_permission_performance(self):
u = User.objects.all()[0]
# Don't depend on a warm cache, see #17377.
ContentType.objects.clear_cache()
with self.assertNumQueries(10):
response = self.client.get(reverse('admin:auth_user_change', args=(u.pk,)))
self.assertEqual(response.status_code, 200)
def test_form_url_present_in_context(self):
u = User.objects.all()[0]
response = self.client.get(reverse('admin3:auth_user_password_change', args=(u.pk,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['form_url'], 'pony')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class GroupAdminTest(TestCase):
"""
Tests group CRUD functionality.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_save_button(self):
group_count = Group.objects.count()
response = self.client.post(reverse('admin:auth_group_add'), {
'name': 'newgroup',
})
Group.objects.order_by('-id')[0]
self.assertRedirects(response, reverse('admin:auth_group_changelist'))
self.assertEqual(Group.objects.count(), group_count + 1)
def test_group_permission_performance(self):
g = Group.objects.create(name="test_group")
# Ensure no queries are skipped due to cached content type for Group.
ContentType.objects.clear_cache()
with self.assertNumQueries(8):
response = self.client.get(reverse('admin:auth_group_change', args=(g.pk,)))
self.assertEqual(response.status_code, 200)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class CSSTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = Section.objects.create(name='Test section')
cls.a1 = Article.objects.create(
content='<p>Middle content</p>', date=datetime.datetime(2008, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a2 = Article.objects.create(
content='<p>Oldest content</p>', date=datetime.datetime(2000, 3, 18, 11, 54, 58), section=cls.s1
)
cls.a3 = Article.objects.create(
content='<p>Newest content</p>', date=datetime.datetime(2009, 3, 18, 11, 54, 58), section=cls.s1
)
cls.p1 = PrePopulatedPost.objects.create(title='A Long Title', published=True, slug='a-long-title')
def setUp(self):
self.client.login(username='super', password='secret')
def test_field_prefix_css_classes(self):
"""
Ensure that fields have a CSS class name with a 'field-' prefix.
Refs #16371.
"""
response = self.client.get(reverse('admin:admin_views_post_add'))
# The main form
self.assertContains(response, 'class="form-row field-title"')
self.assertContains(response, 'class="form-row field-content"')
self.assertContains(response, 'class="form-row field-public"')
self.assertContains(response, 'class="form-row field-awesomeness_level"')
self.assertContains(response, 'class="form-row field-coolness"')
self.assertContains(response, 'class="form-row field-value"')
self.assertContains(response, 'class="form-row"') # The lambda function
# The tabular inline
self.assertContains(response, '<td class="field-url">')
self.assertContains(response, '<td class="field-posted">')
def test_index_css_classes(self):
"""
Ensure that CSS class names are used for each app and model on the
admin index pages.
Refs #17050.
"""
# General index page
response = self.client.get(reverse('admin:index'))
self.assertContains(response, '<div class="app-admin_views module">')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
# App index page
response = self.client.get(reverse('admin:app_list', args=('admin_views',)))
self.assertContains(response, '<div class="app-admin_views module">')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
def test_app_model_in_form_body_class(self):
"""
Ensure app and model tag are correctly read by change_form template
"""
response = self.client.get(reverse('admin:admin_views_section_add'))
self.assertEqual(response.status_code, 200)
self.assertContains(response,
'<body class=" app-admin_views model-section ')
def test_app_model_in_list_body_class(self):
"""
Ensure app and model tag are correctly read by change_list template
"""
response = self.client.get(reverse('admin:admin_views_section_changelist'))
self.assertEqual(response.status_code, 200)
self.assertContains(response,
'<body class=" app-admin_views model-section ')
def test_app_model_in_delete_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by delete_confirmation
template
"""
response = self.client.get(
reverse('admin:admin_views_section_delete', args=(self.s1.pk,)))
self.assertEqual(response.status_code, 200)
self.assertContains(response,
'<body class=" app-admin_views model-section ')
def test_app_model_in_app_index_body_class(self):
"""
Ensure app and model tag are correctly read by app_index template
"""
response = self.client.get(reverse('admin:app_list', args=('admin_views',)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<body class=" dashboard app-admin_views')
def test_app_model_in_delete_selected_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by
delete_selected_confirmation template
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'delete_selected',
'index': 0,
}
response = self.client.post(reverse('admin:admin_views_section_changelist'),
action_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response,
'<body class=" app-admin_views model-section ')
def test_changelist_field_classes(self):
"""
Cells of the change list table should contain the field name in their class attribute
Refs #11195.
"""
Podcast.objects.create(name="Django Dose",
release_date=datetime.date.today())
response = self.client.get(reverse('admin:admin_views_podcast_changelist'))
self.assertContains(
response, '<th class="field-name">')
self.assertContains(
response, '<td class="field-release_date nowrap">')
self.assertContains(
response, '<td class="action-checkbox">')
try:
import docutils
except ImportError:
docutils = None
@unittest.skipUnless(docutils, "no docutils installed.")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
@modify_settings(INSTALLED_APPS={'append': ['django.contrib.admindocs', 'django.contrib.flatpages']})
class AdminDocsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_tags(self):
response = self.client.get(reverse('django-admindocs-tags'))
# The builtin tag group exists
self.assertContains(response, "<h2>Built-in tags</h2>", count=2, html=True)
# A builtin tag exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-autoescape">autoescape</h3>', html=True)
self.assertContains(response, '<li><a href="#built_in-autoescape">autoescape</a></li>', html=True)
# An app tag exists in both the index and detail
self.assertContains(response, '<h3 id="flatpages-get_flatpages">get_flatpages</h3>', html=True)
self.assertContains(response, '<li><a href="#flatpages-get_flatpages">get_flatpages</a></li>', html=True)
# The admin list tag group exists
self.assertContains(response, "<h2>admin_list</h2>", count=2, html=True)
# An admin list tag exists in both the index and detail
self.assertContains(response, '<h3 id="admin_list-admin_actions">admin_actions</h3>', html=True)
self.assertContains(response, '<li><a href="#admin_list-admin_actions">admin_actions</a></li>', html=True)
def test_filters(self):
response = self.client.get(reverse('django-admindocs-filters'))
# The builtin filter group exists
self.assertContains(response, "<h2>Built-in filters</h2>", count=2, html=True)
# A builtin filter exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-add">add</h3>', html=True)
self.assertContains(response, '<li><a href="#built_in-add">add</a></li>', html=True)
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls",
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}],
USE_I18N=False,
)
class ValidXHTMLTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_lang_name_present(self):
response = self.client.get(reverse('admin:app_list', args=('admin_views',)))
self.assertNotContains(response, ' lang=""')
self.assertNotContains(response, ' xml:lang=""')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls",
USE_THOUSAND_SEPARATOR=True, USE_L10N=True)
class DateHierarchyTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
formats.reset_format_cache()
def assert_non_localized_year(self, response, year):
"""Ensure that the year is not localized with
USE_THOUSAND_SEPARATOR. Refs #15234.
"""
self.assertNotContains(response, formats.number_format(year))
def assert_contains_year_link(self, response, date):
self.assertContains(response, '?release_date__year=%d"' % (date.year,))
def assert_contains_month_link(self, response, date):
self.assertContains(
response, '?release_date__month=%d&release_date__year=%d"' % (
date.month, date.year))
def assert_contains_day_link(self, response, date):
self.assertContains(
response, '?release_date__day=%d&'
'release_date__month=%d&release_date__year=%d"' % (
date.day, date.month, date.year))
def test_empty(self):
"""
Ensure that no date hierarchy links display with empty changelist.
"""
response = self.client.get(
reverse('admin:admin_views_podcast_changelist'))
self.assertNotContains(response, 'release_date__year=')
self.assertNotContains(response, 'release_date__month=')
self.assertNotContains(response, 'release_date__day=')
def test_single(self):
"""
Ensure that single day-level date hierarchy appears for single object.
"""
DATE = datetime.date(2000, 6, 30)
Podcast.objects.create(release_date=DATE)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
self.assert_contains_day_link(response, DATE)
self.assert_non_localized_year(response, 2000)
def test_within_month(self):
"""
Ensure that day-level links appear for changelist within single month.
"""
DATES = (datetime.date(2000, 6, 30),
datetime.date(2000, 6, 15),
datetime.date(2000, 6, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
for date in DATES:
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_within_year(self):
"""
Ensure that month-level links appear for changelist within single year.
"""
DATES = (datetime.date(2000, 1, 30),
datetime.date(2000, 3, 15),
datetime.date(2000, 5, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
# no day-level links
self.assertNotContains(response, 'release_date__day=')
for date in DATES:
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_multiple_years(self):
"""
Ensure that year-level links appear for year-spanning changelist.
"""
DATES = (datetime.date(2001, 1, 30),
datetime.date(2003, 3, 15),
datetime.date(2005, 5, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
response = self.client.get(
reverse('admin:admin_views_podcast_changelist'))
# no day/month-level links
self.assertNotContains(response, 'release_date__day=')
self.assertNotContains(response, 'release_date__month=')
for date in DATES:
self.assert_contains_year_link(response, date)
# and make sure GET parameters still behave correctly
for date in DATES:
url = '%s?release_date__year=%d' % (
reverse('admin:admin_views_podcast_changelist'),
date.year)
response = self.client.get(url)
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
url = '%s?release_date__year=%d&release_date__month=%d' % (
reverse('admin:admin_views_podcast_changelist'),
date.year, date.month)
response = self.client.get(url)
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminCustomSaveRelatedTests(TestCase):
"""
Ensure that one can easily customize the way related objects are saved.
Refs #16115.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_should_be_able_to_edit_related_objects_on_add_view(self):
post = {
'child_set-TOTAL_FORMS': '3',
'child_set-INITIAL_FORMS': '0',
'name': 'Josh Stone',
'child_set-0-name': 'Paul',
'child_set-1-name': 'Catherine',
}
self.client.post(reverse('admin:admin_views_parent_add'), post)
self.assertEqual(1, Parent.objects.count())
self.assertEqual(2, Child.objects.count())
children_names = list(Child.objects.order_by('name').values_list('name', flat=True))
self.assertEqual('Josh Stone', Parent.objects.latest('id').name)
self.assertEqual(['Catherine Stone', 'Paul Stone'], children_names)
def test_should_be_able_to_edit_related_objects_on_change_view(self):
parent = Parent.objects.create(name='Josh Stone')
paul = Child.objects.create(parent=parent, name='Paul')
catherine = Child.objects.create(parent=parent, name='Catherine')
post = {
'child_set-TOTAL_FORMS': '5',
'child_set-INITIAL_FORMS': '2',
'name': 'Josh Stone',
'child_set-0-name': 'Paul',
'child_set-0-id': paul.id,
'child_set-1-name': 'Catherine',
'child_set-1-id': catherine.id,
}
self.client.post(reverse('admin:admin_views_parent_change', args=(parent.id,)), post)
children_names = list(Child.objects.order_by('name').values_list('name', flat=True))
self.assertEqual('Josh Stone', Parent.objects.latest('id').name)
self.assertEqual(['Catherine Stone', 'Paul Stone'], children_names)
def test_should_be_able_to_edit_related_objects_on_changelist_view(self):
parent = Parent.objects.create(name='Josh Rock')
Child.objects.create(parent=parent, name='Paul')
Child.objects.create(parent=parent, name='Catherine')
post = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': parent.id,
'form-0-name': 'Josh Stone',
'_save': 'Save'
}
self.client.post(reverse('admin:admin_views_parent_changelist'), post)
children_names = list(Child.objects.order_by('name').values_list('name', flat=True))
self.assertEqual('Josh Stone', Parent.objects.latest('id').name)
self.assertEqual(['Catherine Stone', 'Paul Stone'], children_names)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminViewLogoutTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_client_logout_url_can_be_used_to_login(self):
response = self.client.get(reverse('admin:logout'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/logged_out.html')
self.assertEqual(response.request['PATH_INFO'], reverse('admin:logout'))
# we are now logged out
response = self.client.get(reverse('admin:logout'))
self.assertEqual(response.status_code, 302) # we should be redirected to the login page.
# follow the redirect and test results.
response = self.client.get(reverse('admin:logout'), follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(response.request['PATH_INFO'], reverse('admin:login'))
self.assertContains(response, '<input type="hidden" name="next" value="%s" />' % reverse('admin:index'))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminUserMessageTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def send_message(self, level):
"""
Helper that sends a post to the dummy test methods and asserts that a
message with the level has appeared in the response.
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'message_%s' % level,
'index': 0,
}
response = self.client.post(reverse('admin:admin_views_usermessenger_changelist'),
action_data, follow=True)
self.assertContains(response,
'<li class="%s">Test %s</li>' % (level, level),
html=True)
@override_settings(MESSAGE_LEVEL=10) # Set to DEBUG for this request
def test_message_debug(self):
self.send_message('debug')
def test_message_info(self):
self.send_message('info')
def test_message_success(self):
self.send_message('success')
def test_message_warning(self):
self.send_message('warning')
def test_message_error(self):
self.send_message('error')
def test_message_extra_tags(self):
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'message_extra_tags',
'index': 0,
}
response = self.client.post(reverse('admin:admin_views_usermessenger_changelist'),
action_data, follow=True)
self.assertContains(response,
'<li class="extra_tag info">Test tags</li>',
html=True)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminKeepChangeListFiltersTests(TestCase):
admin_site = site
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.u5 = User.objects.create(
id=104, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=False, username='joepublic',
first_name='Joe', last_name='Public', email='joepublic@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def assertURLEqual(self, url1, url2):
"""
Assert that two URLs are equal despite the ordering
of their querystring. Refs #22360.
"""
parsed_url1 = urlparse(url1)
path1 = parsed_url1.path
parsed_qs1 = dict(parse_qsl(parsed_url1.query))
parsed_url2 = urlparse(url2)
path2 = parsed_url2.path
parsed_qs2 = dict(parse_qsl(parsed_url2.query))
for parsed_qs in [parsed_qs1, parsed_qs2]:
if '_changelist_filters' in parsed_qs:
changelist_filters = parsed_qs['_changelist_filters']
parsed_filters = dict(parse_qsl(changelist_filters))
parsed_qs['_changelist_filters'] = parsed_filters
self.assertEqual(path1, path2)
self.assertEqual(parsed_qs1, parsed_qs2)
def test_assert_url_equal(self):
# Test equality.
change_user_url = reverse('admin:auth_user_change', args=(self.u5.pk,))
self.assertURLEqual(
'http://testserver{}?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0'.format(change_user_url),
'http://testserver{}?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0'.format(change_user_url)
)
# Test inequality.
with self.assertRaises(AssertionError):
self.assertURLEqual(
'http://testserver{}?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0'.format(change_user_url),
'http://testserver{}?_changelist_filters=is_staff__exact%3D1%26is_superuser__exact%3D1'.format(change_user_url)
)
# Ignore scheme and host.
self.assertURLEqual(
'http://testserver{}?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0'.format(change_user_url),
'{}?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0'.format(change_user_url)
)
# Ignore ordering of querystring.
self.assertURLEqual(
'{}?is_staff__exact=0&is_superuser__exact=0'.format(reverse('admin:auth_user_changelist')),
'{}?is_superuser__exact=0&is_staff__exact=0'.format(reverse('admin:auth_user_changelist'))
)
# Ignore ordering of _changelist_filters.
self.assertURLEqual(
'{}?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0'.format(change_user_url),
'{}?_changelist_filters=is_superuser__exact%3D0%26is_staff__exact%3D0'.format(change_user_url)
)
def get_changelist_filters(self):
return {
'is_superuser__exact': 0,
'is_staff__exact': 0,
}
def get_changelist_filters_querystring(self):
return urlencode(self.get_changelist_filters())
def get_preserved_filters_querystring(self):
return urlencode({
'_changelist_filters': self.get_changelist_filters_querystring()
})
def get_sample_user_id(self):
return self.u5.pk
def get_changelist_url(self):
return '%s?%s' % (
reverse('admin:auth_user_changelist',
current_app=self.admin_site.name),
self.get_changelist_filters_querystring(),
)
def get_add_url(self):
return '%s?%s' % (
reverse('admin:auth_user_add',
current_app=self.admin_site.name),
self.get_preserved_filters_querystring(),
)
def get_change_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse('admin:auth_user_change', args=(user_id,),
current_app=self.admin_site.name),
self.get_preserved_filters_querystring(),
)
def get_history_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse('admin:auth_user_history', args=(user_id,),
current_app=self.admin_site.name),
self.get_preserved_filters_querystring(),
)
def get_delete_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse('admin:auth_user_delete', args=(user_id,),
current_app=self.admin_site.name),
self.get_preserved_filters_querystring(),
)
def test_changelist_view(self):
response = self.client.get(self.get_changelist_url())
self.assertEqual(response.status_code, 200)
# Check the `change_view` link has the correct querystring.
detail_link = re.search(
'<a href="(.*?)">{}</a>'.format(self.u5.username),
force_text(response.content)
)
self.assertURLEqual(detail_link.group(1), self.get_change_url())
def test_change_view(self):
# Get the `change_view`.
response = self.client.get(self.get_change_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form enctype="multipart/form-data" action="(.*?)" method="post" id="user_form".*?>',
force_text(response.content)
)
self.assertURLEqual(form_action.group(1), '?%s' % self.get_preserved_filters_querystring())
# Check the history link.
history_link = re.search(
'<a href="(.*?)" class="historylink">History</a>',
force_text(response.content)
)
self.assertURLEqual(history_link.group(1), self.get_history_url())
# Check the delete link.
delete_link = re.search(
'<a href="(.*?)" class="deletelink">Delete</a>',
force_text(response.content)
)
self.assertURLEqual(delete_link.group(1), self.get_delete_url())
# Test redirect on "Save".
post_data = {
'username': 'joepublic',
'last_login_0': '2007-05-30',
'last_login_1': '13:20:10',
'date_joined_0': '2007-05-30',
'date_joined_1': '13:20:10',
}
post_data['_save'] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_changelist_url()
)
post_data.pop('_save')
# Test redirect on "Save and continue".
post_data['_continue'] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_change_url()
)
post_data.pop('_continue')
# Test redirect on "Save and add new".
post_data['_addanother'] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_add_url()
)
post_data.pop('_addanother')
def test_add_view(self):
# Get the `add_view`.
response = self.client.get(self.get_add_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form enctype="multipart/form-data" action="(.*?)" method="post" id="user_form".*?>',
force_text(response.content)
)
self.assertURLEqual(form_action.group(1), '?%s' % self.get_preserved_filters_querystring())
post_data = {
'username': 'dummy',
'password1': 'test',
'password2': 'test',
}
# Test redirect on "Save".
post_data['_save'] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_change_url(User.objects.get(username='dummy').pk)
)
post_data.pop('_save')
# Test redirect on "Save and continue".
post_data['username'] = 'dummy2'
post_data['_continue'] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_change_url(User.objects.get(username='dummy2').pk)
)
post_data.pop('_continue')
# Test redirect on "Save and add new".
post_data['username'] = 'dummy3'
post_data['_addanother'] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_add_url()
)
post_data.pop('_addanother')
def test_delete_view(self):
# Test redirect on "Delete".
response = self.client.post(self.get_delete_url(), {'post': 'yes'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_changelist_url()
)
def test_url_prefix(self):
context = {
'preserved_filters': self.get_preserved_filters_querystring(),
'opts': User._meta,
}
url = reverse('admin:auth_user_changelist', current_app=self.admin_site.name)
self.assertURLEqual(
self.get_changelist_url(),
add_preserved_filters(context, url),
)
with override_script_prefix('/prefix/'):
url = reverse('admin:auth_user_changelist', current_app=self.admin_site.name)
self.assertURLEqual(
self.get_changelist_url(),
add_preserved_filters(context, url),
)
class NamespacedAdminKeepChangeListFiltersTests(AdminKeepChangeListFiltersTests):
admin_site = site2
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class TestLabelVisibility(TestCase):
""" #11277 -Labels of hidden fields in admin were not hidden. """
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_all_fields_visible(self):
response = self.client.get(reverse('admin:admin_views_emptymodelvisible_add'))
self.assert_fieldline_visible(response)
self.assert_field_visible(response, 'first')
self.assert_field_visible(response, 'second')
def test_all_fields_hidden(self):
response = self.client.get(reverse('admin:admin_views_emptymodelhidden_add'))
self.assert_fieldline_hidden(response)
self.assert_field_hidden(response, 'first')
self.assert_field_hidden(response, 'second')
def test_mixin(self):
response = self.client.get(reverse('admin:admin_views_emptymodelmixin_add'))
self.assert_fieldline_visible(response)
self.assert_field_hidden(response, 'first')
self.assert_field_visible(response, 'second')
def assert_field_visible(self, response, field_name):
self.assertContains(response, '<div class="field-box field-%s">' % field_name)
def assert_field_hidden(self, response, field_name):
self.assertContains(response, '<div class="field-box field-%s hidden">' % field_name)
def assert_fieldline_visible(self, response):
self.assertContains(response, '<div class="form-row field-first field-second">')
def assert_fieldline_hidden(self, response):
self.assertContains(response, '<div class="form-row hidden')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class AdminViewOnSiteTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = State.objects.create(name='New York')
cls.s2 = State.objects.create(name='Illinois')
cls.s3 = State.objects.create(name='California')
cls.c1 = City.objects.create(state=cls.s1, name='New York')
cls.c2 = City.objects.create(state=cls.s2, name='Chicago')
cls.c3 = City.objects.create(state=cls.s3, name='San Francisco')
cls.r1 = Restaurant.objects.create(city=cls.c1, name='Italian Pizza')
cls.r2 = Restaurant.objects.create(city=cls.c1, name='Boulevard')
cls.r3 = Restaurant.objects.create(city=cls.c2, name='Chinese Dinner')
cls.r4 = Restaurant.objects.create(city=cls.c2, name='Angels')
cls.r5 = Restaurant.objects.create(city=cls.c2, name='Take Away')
cls.r6 = Restaurant.objects.create(city=cls.c3, name='The Unknown Restaurant')
cls.w1 = Worker.objects.create(work_at=cls.r1, name='Mario', surname='Rossi')
cls.w2 = Worker.objects.create(work_at=cls.r1, name='Antonio', surname='Bianchi')
cls.w3 = Worker.objects.create(work_at=cls.r1, name='John', surname='Doe')
def setUp(self):
self.client.login(username='super', password='secret')
def test_add_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data
"""
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {"family_name": "Test1",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": "",
"dependentchild_set-0-family_name": "Test2"}
response = self.client.post(reverse('admin:admin_views_parentwithdependentchildren_add'),
post_data)
# just verifying the parent form failed validation, as expected --
# this isn't the regression test
self.assertIn('some_required_info', response.context['adminform'].form.errors)
# actual regression test
for error_set in response.context['inline_admin_formset'].formset.errors:
self.assertEqual(['Children must share a family name with their parents in this contrived test case'],
error_set.get('__all__'))
def test_change_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data
"""
pwdc = ParentWithDependentChildren.objects.create(some_required_info=6,
family_name="Test1")
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {"family_name": "Test2",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": str(pwdc.id),
"dependentchild_set-0-family_name": "Test1"}
response = self.client.post(
reverse('admin:admin_views_parentwithdependentchildren_change', args=(pwdc.id,)), post_data
)
# just verifying the parent form failed validation, as expected --
# this isn't the regression test
self.assertIn('some_required_info', response.context['adminform'].form.errors)
# actual regression test
for error_set in response.context['inline_admin_formset'].formset.errors:
self.assertEqual(['Children must share a family name with their parents in this contrived test case'],
error_set.get('__all__'))
def test_check(self):
"Ensure that the view_on_site value is either a boolean or a callable"
try:
CityAdmin.view_on_site = True
self.assertEqual(CityAdmin.check(City), [])
CityAdmin.view_on_site = False
self.assertEqual(CityAdmin.check(City), [])
CityAdmin.view_on_site = lambda obj: obj.get_absolute_url()
self.assertEqual(CityAdmin.check(City), [])
CityAdmin.view_on_site = []
self.assertEqual(CityAdmin.check(City), [
Error(
"The value of 'view_on_site' must be a callable or a boolean value.",
hint=None,
obj=CityAdmin,
id='admin.E025',
),
])
finally:
# Restore the original values for the benefit of other tests.
CityAdmin.view_on_site = True
def test_false(self):
"Ensure that the 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(reverse('admin:admin_views_restaurant_change', args=(self.r1.pk,)))
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertNotContains(response, reverse('admin:view_on_site', args=(content_type_pk, 1)))
def test_true(self):
"Ensure that the default behavior is followed if view_on_site is True"
response = self.client.get(reverse('admin:admin_views_city_change', args=(self.c1.pk,)))
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertContains(response, reverse('admin:view_on_site', args=(content_type_pk, self.c1.pk)))
def test_callable(self):
"Ensure that the right link is displayed if view_on_site is a callable"
response = self.client.get(reverse('admin:admin_views_worker_change', args=(self.w1.pk,)))
self.assertContains(response, '"/worker/%s/%s/"' % (self.w1.surname, self.w1.name))
def test_missing_get_absolute_url(self):
"Ensure None is returned if model doesn't have get_absolute_url"
model_admin = ModelAdmin(Worker, None)
self.assertIsNone(model_admin.get_view_on_site_url(Worker()))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls")
class InlineAdminViewOnSiteTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
cls.s1 = State.objects.create(name='New York')
cls.s2 = State.objects.create(name='Illinois')
cls.s3 = State.objects.create(name='California')
cls.c1 = City.objects.create(state=cls.s1, name='New York')
cls.c2 = City.objects.create(state=cls.s2, name='Chicago')
cls.c3 = City.objects.create(state=cls.s3, name='San Francisco')
cls.r1 = Restaurant.objects.create(city=cls.c1, name='Italian Pizza')
cls.r2 = Restaurant.objects.create(city=cls.c1, name='Boulevard')
cls.r3 = Restaurant.objects.create(city=cls.c2, name='Chinese Dinner')
cls.r4 = Restaurant.objects.create(city=cls.c2, name='Angels')
cls.r5 = Restaurant.objects.create(city=cls.c2, name='Take Away')
cls.r6 = Restaurant.objects.create(city=cls.c3, name='The Unknown Restaurant')
cls.w1 = Worker.objects.create(work_at=cls.r1, name='Mario', surname='Rossi')
cls.w2 = Worker.objects.create(work_at=cls.r1, name='Antonio', surname='Bianchi')
cls.w3 = Worker.objects.create(work_at=cls.r1, name='John', surname='Doe')
def setUp(self):
self.client.login(username='super', password='secret')
def test_false(self):
"Ensure that the 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(reverse('admin:admin_views_state_change', args=(self.s1.pk,)))
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertNotContains(response, reverse('admin:view_on_site', args=(content_type_pk, self.c1.pk)))
def test_true(self):
"Ensure that the 'View on site' button is displayed if view_on_site is True"
response = self.client.get(reverse('admin:admin_views_city_change', args=(self.c1.pk,)))
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertContains(response, reverse('admin:view_on_site', args=(content_type_pk, self.r1.pk)))
def test_callable(self):
"Ensure that the right link is displayed if view_on_site is a callable"
response = self.client.get(reverse('admin:admin_views_restaurant_change', args=(self.r1.pk,)))
self.assertContains(response, '"/worker_inline/%s/%s/"' % (self.w1.surname, self.w1.name))
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestEtagWithAdminView(SimpleTestCase):
# See https://code.djangoproject.com/ticket/16003
def test_admin(self):
with self.settings(USE_ETAGS=False):
response = self.client.get(reverse('admin:index'))
self.assertEqual(response.status_code, 302)
self.assertFalse(response.has_header('ETag'))
with self.settings(USE_ETAGS=True):
response = self.client.get(reverse('admin:index'))
self.assertEqual(response.status_code, 302)
self.assertTrue(response.has_header('ETag'))
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_views.urls",
)
class GetFormsetsWithInlinesArgumentTest(TestCase):
"""
#23934 - When adding a new model instance in the admin, the 'obj' argument
of get_formsets_with_inlines() should be None. When changing, it should be
equal to the existing model instance.
The GetFormsetsArgumentCheckingAdmin ModelAdmin throws an exception
if obj is not None during add_view or obj is None during change_view.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
def test_explicitly_provided_pk(self):
post_data = {'name': '1'}
response = self.client.post(reverse('admin:admin_views_explicitlyprovidedpk_add'), post_data)
self.assertEqual(response.status_code, 302)
post_data = {'name': '2'}
response = self.client.post(reverse('admin:admin_views_explicitlyprovidedpk_change', args=(1,)), post_data)
self.assertEqual(response.status_code, 302)
def test_implicitly_generated_pk(self):
post_data = {'name': '1'}
response = self.client.post(reverse('admin:admin_views_implicitlygeneratedpk_add'), post_data)
self.assertEqual(response.status_code, 302)
post_data = {'name': '2'}
response = self.client.post(reverse('admin:admin_views_implicitlygeneratedpk_change', args=(1,)), post_data)
self.assertEqual(response.status_code, 302)
|
zanderle/django
|
tests/admin_views/tests.py
|
Python
|
bsd-3-clause
| 300,461
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""
Data Analysis RPC server over Tango:
Factory for the loading of plugins
"""
__authors__ = ["Jérôme Kieffer"]
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "17/03/2020"
__status__ = "production"
import os
import os.path as op
import logging
from collections import OrderedDict
logger = logging.getLogger("dahu.factory")
from threading import Semaphore
from .utils import get_workdir, fully_qualified_name
import importlib.util
def load_source(module_name, file_path):
"Plugin loader which does not pollute sys.module"
spec = importlib.util.spec_from_file_location(module_name, file_path)
#module = importlib.util.module_from_spec(spec)
#spec.loader.exec_module(module)
module = spec.loader.load_module(spec.name)
#Option: remove from sys.modules ...
return module
dahu_root = os.path.dirname(os.path.abspath(__file__))
class Factory(object):
"""
This is a factory, it instanciates a plugin from it name
"""
registry = {}
modules = {}
plugin_dirs = OrderedDict() # key: directory name, value=list of modules
reg_sem = Semaphore()
def __init__(self, workdir=None, plugin_path=None):
"""
@param workdir: place were we are allowed to write
@param plugin_path: places where plugins are ... in addition to the content of DAHU_PATH"
"""
self._sem = Semaphore()
self.workdir = workdir or "."
# First the ones from environment
if "DAHU_PLUGINS" in os.environ:
for directory in os.environ["DAHU_PLUGINS"].split(os.pathsep):
self.add_directory(directory)
# Then the ones from configuration
for directory in (plugin_path or []):
self.add_directory(directory)
# Finally the default one
self.add_directory(os.path.join(dahu_root, "plugins"))
def add_directory(self, directory):
abs_dir = os.path.abspath(directory)
if not os.path.isdir(directory):
logger.warning("No such directory: %s" % directory)
return
python_files = []
for i in os.listdir(abs_dir):
j = op.join(abs_dir, i)
if op.isfile(j) and i.endswith(".py"):
python_files.append(i[:-3])
if op.isdir(j) and op.exists(op.join(j, "__init__.py")):
python_files.append(i)
logger.info(f"Available modules in dahu from {directory}:{os.linesep}" + " ".join(python_files))
with self._sem:
self.plugin_dirs[abs_dir] = python_files
def search_plugin(self, plugin_name):
"""
Search for a given plugins ...
starting from the FQN package.class,
"""
if "." not in plugin_name:
logger.error("plugin name have to be fully qualified, here: %s" % plugin_name)
return
splitted = plugin_name.split(".")
module_name = ".".join(splitted[:-1])
for dirname, modules in self.plugin_dirs.items():
if module_name in modules and module_name not in self.modules:
dst = op.join(dirname, module_name)
if op.isdir(dst):
fname = op.join(dst, "__init__.py")
elif op.isfile(dst+".py"):
fname = dst+".py"
else:
raise RuntimeError("Unable to find module source for %s in %s"%(module_name, dirname))
logger.info("load %s from %s",module_name, fname)
mod = load_source(module_name, os.path.join(dirname, fname))
with self.reg_sem:
self.modules[module_name] = mod
def __call__(self, plugin_name):
"""
create a plugin instance from its name
@param plugin_name: name of the plugin as a string
@return: plugin instance
"""
plugin_name = plugin_name.lower()
if plugin_name in self.registry:
return self.registry[plugin_name]()
with self._sem:
self.search_plugin(plugin_name)
if plugin_name not in self.registry:
logger.error("Plugin directories have been searched but plugin"
" %s was not found" % plugin_name)
else:
return self.registry[plugin_name]()
@classmethod
def register(cls, klass, fqn=None):
"""
Register a class as a plugin which can be instanciated.
This can be used as a decorator
@plugin_factor.register
@param klass: class to be registered as a plugin
@param fqn: fully qualified name
@return klass
"""
if fqn is None:
fqn = fully_qualified_name(klass)
logger.debug("Registering plugin %s as %s" % (klass, fqn))
with cls.reg_sem:
cls.registry[fqn] = klass
return klass
plugin_factory = Factory(get_workdir())
register = plugin_factory.register
|
kif/dahu
|
dahu/factory.py
|
Python
|
gpl-2.0
| 5,151
|
class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
return len(set(nums)) != len(nums)
|
tedye/leetcode
|
Python/leetcode.217.contains-duplicate.py
|
Python
|
mit
| 180
|
#!/usr/bin/env python
import re
import players
from player import Player
re_coordinates = re.compile(r"^\(?(\d+)\w*,\w*(\d+)\)?$")
def clientThread(c, addr):
player = createCharacter(c, addr)
c.send(str(len(players.onlinePlayers))+" player(s) currently online...\n")
loop(c, addr, player)
def loop(c, addr, player):
command = ""
while(command != "quit"):
c.send("\n" * 20) # Crude way of clearing the screen
c.send(player.currentArea.display(players.onlinePlayers))
c.send("> ")
command = c.recv(1024).strip()
print "Recieved command from:", addr
parseCommand(command, c, addr, player)
def parseCommand(command, c, addr, player):
# Set player position
match = re_coordinates.match(command)
if(match):
x = int(match.group(1))
y = int(match.group(2))
player.setPosition(x, y)
c.send("Setting your position to x: "+str(x)+" y: "+str(y))
else:
c.send("Unknown command. Type 'help' for a list of commands.")
def createCharacter(c, addr):
c.send("Name: ")
name = c.recv(1024).strip()
c.send("Race: ")
race = c.recv(1024).strip()
c.send("Creating player...\n")
player = Player(name, race)
return player
|
icedvariables/QuickMUD
|
src/clientthread.py
|
Python
|
gpl-3.0
| 1,154
|
from OpenGL.GL import (GL_TRUE,GL_FRAGMENT_SHADER,GL_LINK_STATUS,
GL_VERTEX_SHADER, glAttachShader,glCompileShader,GL_COMPILE_STATUS,
glCreateProgram,glCreateShader, glDeleteProgram,glGetAttribLocation,
glDeleteShader,glGetProgramInfoLog, glGetProgramiv, glGetShaderInfoLog,
glGetShaderiv,glGetUniformLocation, glLinkProgram,glShaderSource,glUseProgram)
# glBindBuffer,glEnableVertexAttribArray,glGetString, glVertexAttribPointer,GL_SHADING_LANGUAGE_VERSION
class JuMEG_TSV_GLSL(object):
""" Helper class for using GLSL shader programs
"""
def __init__(self, vertex, fragment):
"""
Parameters
----------
vertex : str
String containing shader source code for the vertex
shader
fragment : str
String containing shader source code for the fragment
shader
"""
self.program_id = glCreateProgram()
self.id_vertex = self.add_shader(vertex, GL_VERTEX_SHADER)
self.id_frag = self.add_shader(fragment, GL_FRAGMENT_SHADER)
glAttachShader(self.program_id, self.id_vertex)
glAttachShader(self.program_id, self.id_frag)
glLinkProgram(self.program_id)
if glGetProgramiv(self.program_id, GL_LINK_STATUS) != GL_TRUE:
info = glGetProgramInfoLog(self.program_id)
glDeleteProgram(self.program_id)
glDeleteShader(self.id_vertex)
glDeleteShader(self.id_frag)
raise RuntimeError('Error linking program: %s' % (info))
glDeleteShader(self.id_vertex)
glDeleteShader(self.id_frag)
def add_shader(self, source, shader_type):
""" Helper function for compiling a GLSL shader
Parameters
----------
source : str
String containing shader source code
shader_type : valid OpenGL shader type
Type of shader to compile
Returns
-------
value : int
Identifier for shader if compilation is successful
"""
try:
shader_id = glCreateShader(shader_type)
glShaderSource(shader_id, source)
glCompileShader(shader_id)
if glGetShaderiv(shader_id, GL_COMPILE_STATUS) != GL_TRUE:
info = glGetShaderInfoLog(shader_id)
raise RuntimeError('Shader compilation failed: %s' % (info))
return shader_id
except:
glDeleteShader(shader_id)
raise
def uloc(self,n):
return glGetUniformLocation(self.program_id, n)
def aloc(self,n):
return glGetAttribLocation(self.program_id, n)
def uniform_location(self, name):
""" Helper function to get location of an OpenGL uniform variable
Parameters
----------
name : str
Name of the variable for which location is to be returned
Returns
-------
value : int
Integer describing location
"""
return glGetUniformLocation(self.program_id, name)
def attribute_location(self, name):
""" Helper function to get location of an OpenGL attribute variable
Parameters
----------
name : str
Name of the variable for which location is to be returned
Returns
-------
value : int
Integer describing location
"""
return glGetAttribLocation(self.program_id, name)
|
fboers/jumegX
|
tsvgl/old/jumeg_tsv_glsl.py
|
Python
|
bsd-3-clause
| 3,586
|
input = """
c num blocks = 1
c num vars = 250
c minblockids[0] = 1
c maxblockids[0] = 250
p cnf 250 1082
230 -158 -213 0
-139 -140 -202 0
-191 -160 20 0
195 -137 -183 0
176 -64 34 0
61 30 247 0
30 -236 -247 0
207 193 -21 0
50 63 23 0
-217 -180 163 0
97 -13 89 0
111 218 171 0
-28 -89 41 0
29 180 -94 0
-51 35 -53 0
126 -190 -171 0
-228 132 210 0
-97 127 193 0
-32 117 134 0
-200 14 122 0
78 -233 38 0
7 227 249 0
86 -53 126 0
133 109 223 0
-127 206 69 0
115 184 12 0
31 -51 -241 0
-124 -148 175 0
239 66 32 0
-31 46 -214 0
-134 165 -197 0
236 200 -29 0
137 -82 102 0
32 -239 60 0
-145 8 96 0
238 39 -210 0
-152 -154 -48 0
153 69 68 0
-15 180 -187 0
90 35 241 0
171 -83 -199 0
-37 198 123 0
203 17 -25 0
-110 133 -114 0
-57 225 134 0
206 13 -84 0
181 150 185 0
9 182 -10 0
-48 -11 26 0
-179 171 -67 0
197 -54 191 0
170 -31 204 0
-183 31 -2 0
232 153 45 0
-179 82 77 0
-91 -234 -127 0
171 187 -80 0
173 -210 -152 0
35 -191 194 0
106 78 -129 0
227 -190 154 0
85 -230 -164 0
76 139 -187 0
-239 162 150 0
-19 -35 -44 0
105 33 53 0
216 -126 -8 0
-233 242 -146 0
-139 114 118 0
126 91 62 0
-177 -187 -82 0
-221 -30 23 0
169 125 -1 0
-214 41 -45 0
-217 183 -134 0
142 -98 -207 0
52 -147 156 0
-159 216 212 0
179 -201 -166 0
23 243 131 0
-162 216 -118 0
80 78 107 0
6 -123 33 0
99 -194 113 0
246 52 60 0
-28 118 -227 0
-114 112 -154 0
-101 -57 -23 0
-124 -199 -221 0
-133 -47 -25 0
51 -136 -86 0
160 69 -135 0
62 -178 -14 0
-150 -244 15 0
-141 -98 -12 0
165 -3 189 0
126 31 85 0
101 56 -174 0
20 237 -146 0
-140 -131 -97 0
45 -184 -134 0
-211 235 -122 0
89 248 161 0
225 -245 -207 0
209 -117 -118 0
233 -115 -153 0
188 161 116 0
165 -224 50 0
-240 -121 -120 0
-49 99 179 0
247 -23 -68 0
169 -194 60 0
-116 -184 59 0
-199 -9 -61 0
203 29 -121 0
-106 119 -208 0
-57 -20 43 0
37 -175 9 0
196 -150 158 0
20 -46 -244 0
-44 180 -62 0
10 -2 -170 0
78 203 211 0
146 -49 66 0
-1 214 -125 0
-93 -184 228 0
-192 228 -204 0
-30 141 -76 0
50 82 -167 0
-78 173 -177 0
-5 23 -93 0
203 -76 -145 0
-51 221 125 0
187 -142 -66 0
-34 -100 -220 0
-169 21 224 0
45 219 204 0
-231 -113 -19 0
-144 -26 -233 0
-14 -102 34 0
243 -67 -9 0
-99 160 219 0
-221 -154 73 0
12 23 -140 0
33 61 -236 0
-166 -42 160 0
-192 118 -176 0
-244 61 218 0
-5 -134 106 0
209 220 92 0
24 -185 147 0
197 5 81 0
130 -224 -56 0
-27 -112 3 0
156 217 -88 0
-174 217 148 0
117 -94 172 0
6 -199 80 0
-172 198 169 0
232 10 -47 0
-61 202 -129 0
152 39 115 0
179 -229 98 0
-195 75 -139 0
169 165 149 0
-226 209 -79 0
-29 113 156 0
-107 -67 -80 0
-147 -26 -186 0
-92 -69 -248 0
-237 239 -111 0
119 -19 201 0
-67 -66 49 0
248 -127 -150 0
-247 8 69 0
-98 48 220 0
82 -145 57 0
185 172 100 0
-113 -155 -161 0
121 76 -235 0
-190 -168 84 0
156 -1 -26 0
1 113 158 0
229 -153 245 0
-84 -26 152 0
-187 -103 -232 0
190 -167 -84 0
60 29 145 0
-201 -86 33 0
210 122 39 0
68 -182 -212 0
-84 -156 44 0
126 -107 161 0
-99 -203 208 0
172 164 240 0
202 -213 191 0
125 108 -66 0
-236 157 -35 0
-209 -150 91 0
145 72 133 0
-29 -153 170 0
-47 238 -80 0
39 -90 -229 0
-218 -53 -234 0
41 229 33 0
243 -128 -13 0
-191 68 161 0
182 80 125 0
143 -40 -172 0
-97 -110 -187 0
16 -104 2 0
-32 -186 -185 0
174 -114 122 0
-120 -220 38 0
-183 58 -32 0
216 107 -241 0
163 -33 -97 0
134 -44 88 0
50 141 -45 0
-10 104 68 0
-193 95 65 0
17 150 160 0
-70 -142 6 0
-26 -61 -72 0
161 10 148 0
-160 41 237 0
231 99 148 0
86 -15 210 0
104 30 158 0
-18 -53 66 0
167 -233 -47 0
-37 -155 167 0
116 -13 29 0
-60 248 -10 0
97 -221 158 0
105 -147 19 0
213 171 121 0
6 154 -148 0
56 -179 33 0
-212 -204 155 0
-9 -177 120 0
-14 141 11 0
100 -145 165 0
217 -143 93 0
-106 163 -94 0
-117 -126 -37 0
-48 -94 16 0
-126 -214 138 0
220 203 15 0
132 -127 -157 0
223 -159 188 0
228 48 32 0
-192 133 102 0
153 -47 171 0
-220 -197 -227 0
-25 88 101 0
-14 -20 18 0
108 -175 -137 0
-237 118 101 0
-43 124 -187 0
-238 -57 222 0
-23 -16 250 0
104 28 -249 0
-9 -80 -245 0
-10 52 232 0
-95 -78 158 0
86 -61 -41 0
156 -153 -100 0
31 233 -143 0
-127 -185 -139 0
171 -112 -246 0
227 -197 -179 0
-16 95 30 0
-168 97 193 0
32 -146 137 0
135 -68 -62 0
34 -60 135 0
76 -110 -173 0
68 209 -35 0
121 -37 -244 0
155 127 10 0
-183 -155 -11 0
-56 -65 79 0
-10 -129 126 0
240 121 -179 0
228 -190 -235 0
-156 124 -233 0
-60 -220 -163 0
59 167 147 0
162 64 -111 0
5 -13 -118 0
-142 149 99 0
155 -24 -202 0
130 -118 -168 0
-23 237 -156 0
-102 -165 -96 0
-32 -210 -145 0
154 26 34 0
-242 -195 -101 0
-79 163 175 0
-155 -85 -40 0
72 -245 -137 0
-189 131 190 0
-238 -13 -143 0
-29 118 55 0
21 8 -221 0
149 17 -1 0
60 -167 38 0
81 -25 -41 0
212 -45 -3 0
-57 155 220 0
-219 53 61 0
-173 -139 -144 0
-233 26 128 0
-139 4 -33 0
-120 134 159 0
137 -207 -22 0
216 -176 195 0
-122 -11 195 0
145 41 -221 0
243 -244 -54 0
-242 -103 -95 0
3 29 -246 0
-14 111 -147 0
-5 27 -233 0
119 -213 56 0
-91 -120 176 0
-140 -101 184 0
62 -238 -127 0
-62 103 -246 0
171 13 -165 0
107 -47 -54 0
169 170 -153 0
36 -46 180 0
146 -79 221 0
149 171 133 0
-207 174 71 0
-114 190 155 0
127 -40 -18 0
126 -91 -207 0
24 -6 -89 0
173 90 216 0
-124 131 242 0
-176 -203 135 0
-181 -152 246 0
-34 111 -230 0
65 -146 209 0
-98 -19 -223 0
29 -217 -159 0
185 -125 -221 0
-6 -225 157 0
-168 -52 -250 0
191 -149 122 0
-250 26 -219 0
200 -114 140 0
241 226 -94 0
-88 101 216 0
-40 237 -157 0
-26 -244 -24 0
162 -37 214 0
-212 18 170 0
-86 -225 62 0
-36 -170 -241 0
-60 -227 174 0
-215 176 -160 0
-198 17 238 0
200 -110 -61 0
-21 70 180 0
-209 -111 25 0
181 169 -98 0
189 196 154 0
-144 17 215 0
-87 191 30 0
-80 174 -111 0
230 -182 24 0
-195 200 -193 0
-34 79 -134 0
-190 48 -116 0
-221 -216 143 0
-82 -135 163 0
202 -194 -31 0
6 163 89 0
-90 10 -182 0
-44 -126 129 0
-99 240 125 0
30 49 -65 0
128 236 176 0
-45 -239 -157 0
218 196 -185 0
-4 -192 -38 0
-109 -156 151 0
-61 84 -63 0
-189 129 132 0
121 83 176 0
-102 -3 178 0
148 4 -211 0
88 190 -158 0
191 -120 -235 0
237 88 -248 0
106 -17 -226 0
126 40 -152 0
-18 -22 20 0
-203 153 -31 0
5 -20 226 0
-99 -115 -67 0
228 -164 -16 0
85 133 114 0
165 22 250 0
225 237 -155 0
93 -56 76 0
-63 -150 -72 0
-62 -30 222 0
-95 181 47 0
102 31 -98 0
-234 110 -227 0
-240 65 -101 0
190 -25 186 0
4 153 -32 0
113 51 106 0
-157 143 26 0
-194 23 41 0
-141 -54 -30 0
-159 200 40 0
156 -12 215 0
37 -146 58 0
190 -7 112 0
209 51 -10 0
-38 -96 242 0
-156 -63 -189 0
-159 14 -222 0
76 -125 -45 0
110 -131 111 0
-228 89 -127 0
-141 55 139 0
72 202 -31 0
224 248 169 0
-50 76 -149 0
-185 170 32 0
-247 22 201 0
238 150 -169 0
-124 127 -30 0
-60 90 70 0
-93 150 -128 0
-33 -221 173 0
-103 180 35 0
66 -108 -61 0
48 120 107 0
134 -178 64 0
-109 130 -3 0
-33 223 201 0
72 -248 -234 0
-165 126 -146 0
-74 -172 -125 0
103 -83 -134 0
-66 53 156 0
-226 -192 -246 0
242 135 -222 0
-170 -240 -135 0
-183 111 185 0
152 -79 164 0
-17 -123 139 0
-71 -175 165 0
4 224 208 0
-202 -43 -108 0
183 -133 -54 0
36 -131 208 0
-234 29 -159 0
194 112 84 0
-69 140 -143 0
-34 -177 10 0
114 -154 247 0
-119 -66 -9 0
26 -149 146 0
-174 -6 169 0
58 31 170 0
82 183 -234 0
-157 50 -78 0
-150 -77 -140 0
-76 141 -166 0
-100 215 167 0
156 118 -126 0
-68 -184 -66 0
-191 169 -73 0
-114 56 179 0
175 47 225 0
210 45 124 0
-166 -56 159 0
120 -90 -174 0
195 -50 -75 0
84 -171 -107 0
-80 120 38 0
224 239 -200 0
121 209 -177 0
199 -81 -60 0
-86 -197 220 0
100 102 -229 0
197 19 -201 0
205 49 40 0
-170 122 -226 0
-198 -236 -54 0
-235 -203 159 0
-221 26 -111 0
-117 180 -72 0
212 -195 45 0
147 231 201 0
-172 -115 -35 0
-220 142 -202 0
-54 93 -166 0
-132 -209 -98 0
40 250 135 0
34 226 184 0
117 -245 -91 0
-145 245 201 0
96 20 11 0
-118 104 79 0
-211 172 226 0
-233 -209 123 0
205 -56 51 0
246 39 -6 0
85 -108 -202 0
-1 -60 70 0
157 -250 52 0
246 217 237 0
-126 -136 199 0
-194 146 -128 0
145 159 -48 0
-229 -89 118 0
132 126 20 0
134 -20 146 0
-89 -165 -34 0
-42 106 240 0
-68 207 123 0
45 215 -237 0
-107 120 102 0
-74 126 200 0
171 188 -236 0
-27 -99 -125 0
140 106 -104 0
-125 241 -98 0
-84 -241 -60 0
-230 -236 -50 0
-209 -248 217 0
245 -120 220 0
-175 -16 3 0
-98 120 -154 0
-151 185 -4 0
-95 -218 -69 0
109 -11 151 0
87 100 -96 0
3 246 -86 0
21 -139 -220 0
-144 -95 -83 0
-148 104 2 0
-190 -68 184 0
50 162 8 0
123 136 179 0
116 176 198 0
-194 88 66 0
-203 12 16 0
98 3 175 0
94 -3 -108 0
222 150 -235 0
-34 -127 -51 0
-22 161 88 0
-5 248 88 0
-104 -14 108 0
-187 -173 -230 0
-1 -91 153 0
106 -15 -171 0
-207 141 209 0
136 132 59 0
128 9 -22 0
-196 223 -210 0
-90 226 11 0
-78 -211 30 0
-184 237 211 0
200 63 55 0
34 81 -51 0
-55 182 -156 0
-125 -126 104 0
32 -87 -148 0
15 -10 -209 0
135 -130 133 0
146 -183 67 0
-138 201 150 0
-208 -249 173 0
18 -87 1 0
10 245 90 0
-46 177 41 0
155 -10 -63 0
-45 8 -122 0
-49 60 179 0
227 -118 250 0
175 -144 132 0
-184 87 -10 0
-111 60 -196 0
177 168 201 0
-138 -129 231 0
-250 -79 -187 0
2 -243 40 0
-197 235 -249 0
219 -139 -248 0
-14 -125 202 0
154 122 -172 0
185 12 38 0
5 139 -135 0
219 -102 12 0
-59 -244 -139 0
-198 226 21 0
141 186 78 0
-41 -95 -32 0
-249 -92 191 0
-7 135 -20 0
8 -223 -182 0
112 -18 248 0
189 178 -129 0
24 -231 9 0
93 240 111 0
107 136 161 0
120 -124 84 0
237 96 -247 0
111 -183 -247 0
111 231 -105 0
-145 234 -9 0
-217 79 -118 0
150 -90 -32 0
-126 153 199 0
141 46 -138 0
222 -18 -17 0
-151 -74 -218 0
-34 158 166 0
244 -62 -17 0
-107 -17 -27 0
33 -119 -81 0
18 -91 -11 0
239 -9 84 0
34 -207 33 0
178 -179 -52 0
-165 134 -117 0
238 -81 7 0
-212 39 -99 0
110 -56 -158 0
-245 22 239 0
33 226 125 0
228 -75 -178 0
-123 17 -143 0
-238 -98 -247 0
-104 9 -204 0
46 206 -112 0
-94 67 -113 0
-166 -37 169 0
217 182 -105 0
-220 -121 130 0
201 59 2 0
-1 -196 -92 0
152 -62 -237 0
-244 197 106 0
-110 84 17 0
175 -25 200 0
32 -141 222 0
-250 -171 -64 0
53 -14 -238 0
-172 223 -110 0
-3 133 43 0
161 9 42 0
-81 -197 176 0
177 73 -212 0
-66 -29 -75 0
195 -119 66 0
-144 90 108 0
131 173 -33 0
220 168 43 0
-115 -126 -5 0
-102 -3 175 0
120 -178 34 0
-11 -112 -174 0
112 -70 42 0
9 247 234 0
245 126 -219 0
-170 -79 218 0
193 -84 -23 0
73 -112 -183 0
-250 19 -54 0
53 -24 -35 0
-233 -181 96 0
-145 -230 176 0
160 -170 -49 0
-72 22 -132 0
113 132 172 0
66 -41 -102 0
-58 130 78 0
243 219 202 0
54 224 -32 0
156 240 -161 0
153 98 -111 0
-5 224 100 0
-203 -127 -96 0
-16 60 239 0
167 144 -72 0
-249 -243 155 0
-230 135 159 0
-91 88 -112 0
14 239 -229 0
-205 70 126 0
230 -234 -39 0
111 -98 151 0
62 -217 -94 0
-62 18 -185 0
222 -178 -20 0
-60 -177 -240 0
-137 121 -233 0
144 43 112 0
215 86 76 0
-102 -103 -244 0
-184 56 249 0
143 223 193 0
118 183 -152 0
202 -1 157 0
-172 -94 114 0
-85 87 -84 0
207 -140 -212 0
-196 -62 23 0
-61 14 -25 0
-139 112 19 0
-131 -173 -29 0
-218 113 -102 0
-183 212 6 0
22 -42 -167 0
-157 -63 179 0
-153 115 -28 0
43 115 -159 0
-170 171 -193 0
-207 12 -177 0
82 -92 74 0
-224 152 109 0
207 -99 205 0
94 -248 142 0
-209 92 -65 0
-121 -54 -171 0
-66 -58 202 0
199 -226 136 0
232 25 57 0
45 102 52 0
41 -192 105 0
187 198 -235 0
-10 140 131 0
-187 229 25 0
55 -26 101 0
-14 239 41 0
124 -219 -142 0
-94 -182 -169 0
183 232 89 0
-43 233 -157 0
-89 -61 -11 0
-100 74 -247 0
-58 25 238 0
99 -76 -80 0
-94 -2 68 0
89 112 44 0
-50 -243 90 0
113 -246 247 0
135 -142 65 0
140 230 24 0
-138 96 242 0
30 171 138 0
-88 36 -39 0
-43 -142 89 0
-207 -239 -183 0
81 243 -74 0
171 -13 176 0
188 123 176 0
-67 -96 -56 0
-107 -15 52 0
54 -53 -15 0
182 -132 -230 0
-245 -168 -91 0
-112 45 165 0
-174 21 -122 0
-2 119 -224 0
-70 -240 -4 0
-211 226 -179 0
219 -137 -189 0
-139 68 -223 0
74 -235 210 0
-205 -36 94 0
-17 93 -141 0
39 -62 38 0
210 180 -28 0
-132 41 63 0
139 -98 -34 0
189 243 -124 0
100 86 213 0
-197 25 35 0
-47 222 127 0
-202 -115 125 0
221 241 -154 0
121 -187 -42 0
3 212 61 0
-234 -160 -217 0
44 144 -121 0
133 -233 -164 0
-210 147 -114 0
-175 65 116 0
104 106 214 0
-195 107 -144 0
-120 212 76 0
196 99 225 0
199 -230 -120 0
107 243 -181 0
-136 -157 -36 0
-146 18 137 0
-76 -143 -66 0
-183 -97 -227 0
-28 123 204 0
-219 217 -167 0
190 4 167 0
107 -76 -139 0
-88 -242 215 0
-4 -118 -65 0
-204 -177 -166 0
-244 -81 68 0
-114 -40 87 0
-210 125 168 0
127 -11 -97 0
211 171 -48 0
-199 -92 -57 0
-118 -26 -52 0
-26 -197 -161 0
-81 -154 -99 0
-120 -10 115 0
-165 128 170 0
153 -187 110 0
-243 36 -209 0
-27 -80 190 0
-183 -167 196 0
163 -128 196 0
126 31 128 0
-36 224 188 0
70 28 200 0
22 209 -230 0
-39 135 222 0
172 220 -215 0
-89 -135 230 0
-21 -159 -66 0
-245 149 175 0
-113 -114 -38 0
41 222 -191 0
163 -82 -198 0
107 -142 122 0
-36 220 -166 0
-85 -198 -36 0
-192 -248 -207 0
-54 133 36 0
-71 201 8 0
-36 102 83 0
-226 -38 187 0
228 -103 -222 0
126 85 -86 0
-63 88 48 0
2 -67 160 0
223 65 215 0
-51 -153 67 0
68 198 147 0
235 4 152 0
-8 180 -190 0
-195 11 203 0
30 -32 85 0
248 -37 -224 0
144 73 -114 0
114 -197 -74 0
-109 -222 65 0
-94 -121 61 0
146 -157 -77 0
-195 -88 165 0
17 238 -61 0
-40 132 53 0
25 -155 71 0
199 -247 -103 0
81 18 -49 0
60 -152 -228 0
157 3 -208 0
-122 41 -239 0
197 -163 209 0
69 209 71 0
-246 -205 -190 0
126 189 57 0
-232 -190 -217 0
238 102 69 0
225 -52 220 0
238 -113 -45 0
-87 134 249 0
-120 175 -240 0
-109 -67 25 0
22 139 173 0
184 -31 -190 0
203 193 199 0
-205 -60 106 0
120 247 162 0
-120 244 -135 0
-193 -5 -200 0
91 93 -31 0
128 -210 -194 0
167 -143 -101 0
115 -184 233 0
-221 245 120 0
73 122 105 0
-80 204 178 0
160 -85 -152 0
-185 -245 122 0
-111 -209 -139 0
-122 147 -102 0
177 -26 164 0
137 -230 -162 0
-33 -109 208 0
215 208 -152 0
-103 -1 -13 0
-160 -89 216 0
-215 118 -145 0
-203 65 -55 0
-248 -144 -27 0
-91 65 216 0
92 110 48 0
-174 -89 3 0
10 -38 -31 0
-96 223 113 0
-108 160 -131 0
-38 15 -174 0
-210 190 -172 0
220 -111 -11 0
200 -175 -243 0
182 45 134 0
58 -71 67 0
-4 -134 104 0
-47 178 195 0
204 71 -124 0
122 -132 -181 0
-224 125 -148 0
192 -117 -53 0
235 -39 -95 0
-17 -24 -174 0
221 151 80 0
-17 182 8 0
132 75 -61 0
237 170 -9 0
-222 -52 -92 0
243 144 -238 0
-162 107 -112 0
-93 -127 14 0
-229 -75 -204 0
-230 -191 64 0
19 13 -85 0
-4 -81 119 0
74 182 13 0
-225 237 212 0
207 -187 -201 0
-28 1 200 0
103 -249 -218 0
67 -167 190 0
61 -34 24 0
-43 54 -220 0
-246 -17 156 0
-166 240 242 0
27 31 -170 0
250 -141 -92 0
214 -12 106 0
165 217 245 0
-52 -128 116 0
164 -33 102 0
68 -122 -43 0
198 164 15 0
-244 139 108 0
130 -113 -102 0
-205 -175 -62 0
13 239 -30 0
233 42 -8 0
35 -76 241 0
-223 27 -201 0
184 31 -75 0
-180 -145 -49 0
-85 12 11 0
-170 -157 -243 0
-99 -214 89 0
230 -131 -248 0
-129 -243 -178 0
232 219 80 0
96 190 -22 0
65 -201 -236 0
225 -16 24 0
-212 -133 -242 0
185 25 -82 0
18 -194 -234 0
-94 -139 51 0
-208 171 -191 0
-33 49 -99 0
230 -91 -3 0
215 50 -165 0
-219 -11 -167 0
-19 37 189 0
143 -191 -141 0
151 -248 57 0
-208 55 -133 0
24 -155 -165 0
-14 -9 18 0
-152 149 -1 0
-77 98 71 0
131 -31 129 0
92 -39 -218 0
203 166 45 0
-158 -122 -131 0
-93 -249 239 0
99 77 -54 0
-105 41 216 0
-200 -210 101 0
76 -150 238 0
8 -214 60 0
145 -135 -226 0
-170 -49 -120 0
-236 117 -69 0
173 245 98 0
-31 32 116 0
151 103 56 0
-175 58 -36 0
-197 -109 -166 0
106 -8 -97 0
35 -70 -210 0
-236 -71 -105 0
-60 -110 237 0
248 214 -126 0
-49 -17 176 0
17 -160 219 0
155 58 -60 0
159 7 212 0
242 -195 250 0
120 99 75 0
93 1 -226 0
-213 49 -195 0
20 -55 -99 0
-82 20 92 0
59 -28 177 0
-165 187 181 0
-218 -211 -208 0
78 -105 -28 0
-41 113 222 0
-16 18 157 0
-118 -111 -60 0
51 -95 -151 0
-217 219 -190 0
-172 237 158 0
-224 -43 34 0
240 -231 58 0
100 31 -114 0
-145 100 201 0
35 238 -237 0
247 191 147 0
-241 53 -98 0
-52 -26 79 0
-246 58 -183 0
196 234 -72 0
230 199 -208 0
167 8 180 0
227 -1 -125 0
-6 102 142 0
78 -243 -77 0
178 135 174 0
42 30 171 0
-66 23 224 0
-168 -146 155 0
-135 22 -160 0
82 68 -18 0
169 -179 -127 0
78 -244 -80 0
23 -192 -51 0
161 -23 -114 0
-236 223 -133 0
21 47 237 0
93 -150 155 0
125 -44 91 0
131 25 -162 0
-245 180 -127 0
186 229 -191 0
241 244 -105 0
32 -78 -147 0
"""
output = "SAT"
|
Yarrick13/hwasp
|
tests/sat/Intensive/c1082.250.SAT.dimacs.test.py
|
Python
|
apache-2.0
| 15,546
|
# Make sure you name your file with className.py
from hint_class_helpers.find_matches import find_matches
class Prob6_Part3:
"""
Author: Shen Ting Ang
Date: 11/7/2016
"""
def check_attempt(self, params):
self.attempt = params['attempt'] #student's attempt
self.answer = params['answer'] #solution
self.att_tree = params['att_tree'] #attempt tree
self.ans_tree = params['ans_tree'] #solution tree
matches = find_matches(params)
matching_node = [m[0] for m in matches]
try:
hint = 'The marginal distribution of X is the sum of all Y values.'
return hint + ' If P(Y=1,X=2) = P(Y=2,X=2) = P(Y=3,X=2) = 0.1, what is P(X=2)?', '0.3'
except Exception:
return '',''
def get_problems(self):
self.problem_list = ["ExpectationVariance/cov_dep_uncor.imd"]
return self.problem_list
|
zhenzhai/edx-platform
|
common/lib/sandbox-packages/hint/hint_class/Week7/Prob6_Part3.py
|
Python
|
agpl-3.0
| 911
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from HTMLParser import HTMLParser
from htmlentitydefs import entitydefs
NON_BREAKING_SPACE = u'\xA0'
class HtmlReader(HTMLParser):
IGNORE = 0
INITIAL = 1
PROCESS = 2
def __init__(self):
HTMLParser.__init__(self)
self._encoding = 'ISO-8859-1'
self._handlers = {'table_start' : self.table_start,
'table_end' : self.table_end,
'tr_start' : self.tr_start,
'tr_end' : self.tr_end,
'td_start' : self.td_start,
'td_end' : self.td_end,
'th_start' : self.td_start,
'th_end' : self.td_end,
'br_start' : self.br_start,
'meta_start' : self.meta_start}
def read(self, htmlfile, populator):
self.populator = populator
self.state = self.IGNORE
self.current_row = None
self.current_cell = None
for line in htmlfile.readlines():
self.feed(self._decode(line))
# Calling close is required by the HTMLParser but may cause problems
# if the same instance of our HtmlParser is reused. Currently it's
# used only once so there's no problem.
self.close()
self.populator.eof()
def _decode(self, line):
return line.decode(self._encoding)
def handle_starttag(self, tag, attrs):
handler = self._handlers.get(tag+'_start')
if handler is not None:
handler(attrs)
def handle_endtag(self, tag):
handler = self._handlers.get(tag+'_end')
if handler is not None:
handler()
def handle_data(self, data):
if self.state == self.IGNORE or self.current_cell is None:
return
if NON_BREAKING_SPACE in data:
data = data.replace(NON_BREAKING_SPACE, ' ')
self.current_cell.append(data)
def handle_entityref(self, name):
value = self._handle_entityref(name)
self.handle_data(value)
def _handle_entityref(self, name):
if name == 'apos': # missing from entitydefs
return "'"
try:
value = entitydefs[name]
except KeyError:
return '&'+name+';'
if value.startswith('&#'):
return unichr(int(value[2:-1]))
return value.decode('ISO-8859-1')
def handle_charref(self, number):
value = self._handle_charref(number)
self.handle_data(value)
def _handle_charref(self, number):
if number.startswith(('x', 'X')):
base = 16
number = number[1:]
else:
base = 10
try:
return unichr(int(number, base))
except ValueError:
return '&#'+number+';'
def unknown_decl(self, data):
# Ignore everything even if it's invalid. This kind of stuff comes
# at least from MS Excel
pass
def table_start(self, attrs=None):
self.state = self.INITIAL
self.current_row = None
self.current_cell = None
def table_end(self):
if self.current_row is not None:
self.tr_end()
self.state = self.IGNORE
def tr_start(self, attrs=None):
if self.current_row is not None:
self.tr_end()
self.current_row = []
def tr_end(self):
if self.current_row is None:
return
if self.current_cell is not None:
self.td_end()
if self.state == self.INITIAL:
accepted = self.populator.start_table(self.current_row)
self.state = self.PROCESS if accepted else self.IGNORE
elif self.state == self.PROCESS:
self.populator.add(self.current_row)
self.current_row = None
def td_start(self, attrs=None):
if self.current_cell is not None:
self.td_end()
if self.current_row is None:
self.tr_start()
self.current_cell = []
def td_end(self):
if self.current_cell is not None and self.state != self.IGNORE:
cell = ''.join(self.current_cell)
self.current_row.append(cell)
self.current_cell = None
def br_start(self, attrs=None):
self.handle_data('\n')
def meta_start(self, attrs):
encoding = self._get_encoding_from_meta(attrs)
if encoding:
self._encoding = encoding
def _get_encoding_from_meta(self, attrs):
valid_http_equiv = False
encoding = None
for name, value in attrs:
name = name.lower()
if name == 'charset': # html5
return value
if name == 'http-equiv' and value.lower() == 'content-type':
valid_http_equiv = True
if name == 'content':
encoding = self._get_encoding_from_content_attr(value)
return encoding if valid_http_equiv else None
def _get_encoding_from_content_attr(self, value):
for token in value.split(';'):
token = token.strip()
if token.lower().startswith('charset='):
return token[8:]
def handle_pi(self, data):
encoding = self._get_encoding_from_pi(data)
if encoding:
self._encoding = encoding
def _get_encoding_from_pi(self, data):
data = data.strip()
if not data.lower().startswith('xml '):
return None
if data.endswith('?'):
data = data[:-1]
for token in data.split():
if token.lower().startswith('encoding='):
encoding = token[9:]
if encoding.startswith("'") or encoding.startswith('"'):
encoding = encoding[1:-1]
return encoding
return None
|
eric-stanley/robotframework
|
src/robot/parsing/htmlreader.py
|
Python
|
apache-2.0
| 6,458
|
# -*- coding: utf-8 -*-
# @Author: ZwEin
# @Date: 2016-06-09 11:10:02
# @Last Modified by: ZwEin
# @Last Modified time: 2016-06-16 09:10:04
|
ZwEin27/phone-number-matcher
|
pnmatcher/core/__init__.py
|
Python
|
apache-2.0
| 144
|
from apiclient import APIClient, RateLimiter
class FreebaseAPI(APIClient):
API_FILE = '.freebase_api_key'
API_KEY = open(API_FILE).read()
BASE_URL = 'https://www.googleapis.com/freebase/v1/'
def search(self,params):
params['apikey'] = self.API_KEY
path = 'search'
print params
return self.call(path, **params)['result']
def actor(self,name):
return self.search({
'filter':'(all name:"'+name+'" type:/film/actor)'
})[0] # hope it is the first hit.
def all_actors(self):
return self.search({
'filter':'(all type:/film/actor)',
'start':200,'limit':200
})
def movies(self,actor_name):
return self.search({
'filter':'(all type:/film/film contributor:"'+actor_name+'")',
'limit':'200'
})
if __name__ == '__main__':
lock = RateLimiter(max_messages=10, every_seconds=60)
fb = FreebaseAPI(rate_limit_lock=lock)
actor = fb.all_actors()[0]
print actor['name']
for movie in fb.movies(actor['name']):
print movie['name']
|
andynu/RottenTomatoesActorQuality
|
src/freebase.py
|
Python
|
gpl-2.0
| 1,013
|
import numpy as np
import scipy.sparse
from scipy.spatial.distance import cdist
from .common import Benchmark, safe_import
with safe_import():
from scipy.sparse.csgraph import maximum_bipartite_matching,\
min_weight_full_bipartite_matching
class MaximumBipartiteMatching(Benchmark):
params = [[5000, 7500, 10000], [0.0001, 0.0005, 0.001]]
param_names = ['n', 'density']
def setup(self, n, density):
# Create random sparse matrices. Note that we could use
# scipy.sparse.rand for this purpose, but simply using np.random and
# disregarding duplicates is quite a bit faster.
np.random.seed(42)
d = np.random.randint(0, n, size=(int(n*n*density), 2))
graph = scipy.sparse.csr_matrix((np.ones(len(d)), (d[:, 0], d[:, 1])),
shape=(n, n))
self.graph = graph
def time_maximum_bipartite_matching(self, n, density):
maximum_bipartite_matching(self.graph)
# For benchmarking min_weight_full_bipartite_matching, we rely on some of
# the classes defined in Burkard, Dell'Amico, Martello -- Assignment Problems,
# 2009, Section 4.10.1.
def random_uniform(shape):
return scipy.sparse.csr_matrix(np.random.uniform(1, 100, shape))
def random_uniform_sparse(shape):
return scipy.sparse.random(shape[0], shape[1], density=0.1, format='csr')
def random_uniform_integer(shape):
return scipy.sparse.csr_matrix(np.random.randint(1, 1000, shape))
def random_geometric(shape):
P = np.random.randint(1, 1000, size=(shape[0], 2))
Q = np.random.randint(1, 1000, size=(shape[1], 2))
return scipy.sparse.csr_matrix(cdist(P, Q, 'sqeuclidean'))
def random_two_cost(shape):
return scipy.sparse.csr_matrix(np.random.choice((1, 1000000), shape))
def machol_wien(shape):
# Machol--Wien instances being harder than the other examples, we cut
# down the size of the instance by 5.
return scipy.sparse.csr_matrix(
np.outer(np.arange(shape[0]//5) + 1, np.arange(shape[1]//5) + 1))
class MinWeightFullBipartiteMatching(Benchmark):
sizes = range(100, 401, 100)
param_names = ['shapes', 'input_type']
params = [
[(i, i) for i in sizes] + [(i, 2 * i) for i in sizes],
['random_uniform', 'random_uniform_sparse', 'random_uniform_integer',
'random_geometric', 'random_two_cost', 'machol_wien']
]
def setup(self, shape, input_type):
np.random.seed(42)
input_func = {'random_uniform': random_uniform,
'random_uniform_sparse': random_uniform_sparse,
'random_uniform_integer': random_uniform_integer,
'random_geometric': random_geometric,
'random_two_cost': random_two_cost,
'machol_wien': machol_wien}[input_type]
self.biadjacency_matrix = input_func(shape)
def time_evaluation(self, *args):
min_weight_full_bipartite_matching(self.biadjacency_matrix)
|
WarrenWeckesser/scipy
|
benchmarks/benchmarks/sparse_csgraph_matching.py
|
Python
|
bsd-3-clause
| 3,000
|
from core.vectors import PhpCode, ShellCmd, ModuleExec, Os
from core.module import Module
from core import modules
from core import messages
from core.loggers import log
import urllib.parse
import os
class Upload2web(Module):
"""Upload file automatically to a web folder and get corresponding URL."""
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
self.register_arguments([
{ 'name' : 'lpath', 'help' : 'Local file path. Set remote file name when used with -content.' },
{ 'name' : 'rpath', 'help' : 'Remote path. If it is a folder find the first writable folder in it', 'default' : '.', 'nargs' : '?' },
{ 'name' : '-content', 'help' : 'Optionally specify the file content'},
{ 'name' : '-simulate', 'help' : 'Just return the positions without uploading any content', 'action' : 'store_true', 'default' : False },
])
def _get_env_info(self, script_url):
script_folder = ModuleExec('system_info', [ '-info', 'script_folder' ]).load_result_or_run('script_folder')
if not script_folder: return
script_url_splitted = urllib.parse.urlsplit(script_url)
script_url_path_folder, script_url_path_filename = os.path.split(
script_url_splitted.path)
url_folder_pieces = script_url_path_folder.split(os.sep)
folder_pieces = script_folder.split(os.sep)
for pieceurl, piecefolder in zip(reversed(url_folder_pieces), reversed(folder_pieces)):
if pieceurl == piecefolder:
folder_pieces.pop()
url_folder_pieces.pop()
else:
break
base_url_path_folder = os.sep.join(url_folder_pieces)
self.base_folder_url = urllib.parse.urlunsplit(
script_url_splitted[:2] + (base_url_path_folder, ) + script_url_splitted[3:])
self.base_folder_path = os.sep.join(folder_pieces)
def _map_folder2web(self, relative_path_folder='.'):
absolute_path = ModuleExec('file_check', [ relative_path_folder, 'abspath' ]).run()
if not absolute_path:
log.warn(messages.module_file_upload2web.failed_resolve_path)
return None, None
if not absolute_path.startswith(self.base_folder_path.rstrip('/')):
log.warn(messages.module_file_upload2web.error_s_not_under_webroot_s % (
absolute_path,
self.base_folder_path.rstrip('/'))
)
return None, None
relative_to_webroot_path = absolute_path.replace(
self.base_folder_path,
''
)
url_folder = '%s/%s' % (self.base_folder_url.rstrip('/'),
relative_to_webroot_path.lstrip('/'))
return absolute_path, url_folder
def _map_file2web(self, relative_path_file):
relative_path_folder, filename = os.path.split(relative_path_file)
if not relative_path_folder:
relative_path_folder = './'
absolute_path_folder, url_folder = self._map_folder2web(
relative_path_folder)
if not absolute_path_folder or not url_folder:
return None, None
absolute_path_file = os.path.join(absolute_path_folder, filename)
url_file = os.path.join(url_folder, filename)
return absolute_path_file, url_file
def run(self):
file_upload_args = [ self.args['rpath'] ]
content = self.args.get('content')
lpath = self.args.get('lpath')
self._get_env_info(self.session['url'])
if not self.base_folder_url or not self.base_folder_path:
log.warn(messages.module_file_upload2web.failed_retrieve_info)
# If remote path is a folder, get first writable folder
if ModuleExec("file_check", [ self.args['rpath'], 'dir' ]).run():
folders = ModuleExec("file_find", [ '-writable', '-quit', self.args['rpath'] ]).run()
if not folders or not folders[0]:
log.warn(messages.module_file_upload2web.failed_search_writable_starting_s % self.args['rpath'])
return None, None
# Get remote file name from lpath
lfolder, rname = os.path.split(lpath)
# TODO: all the paths should be joined with remote OS_SEP from system_info.
self.args['rpath'] = os.path.join(folders[0], rname)
file_upload_args = [ lpath, self.args['rpath'] ]
if content:
file_upload_args += [ '-content', content ]
if self.args.get('simulate') or ModuleExec("file_upload", file_upload_args).run():
# Guess URL from rpath
return [ self._map_file2web(self.args['rpath']) ]
|
epinna/weevely3
|
modules/file/upload2web.py
|
Python
|
gpl-3.0
| 4,967
|
"""
Demo platform that has two fake remotes.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from homeassistant.components.remote import RemoteDevice
from homeassistant.const import DEVICE_DEFAULT_NAME
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the demo remotes."""
add_devices_callback([
DemoRemote('Remote One', False, None),
DemoRemote('Remote Two', True, 'mdi:remote'),
])
class DemoRemote(RemoteDevice):
"""Representation of a demo remote."""
def __init__(self, name, state, icon):
"""Initialize the Demo Remote."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self._icon = icon
@property
def should_poll(self):
"""No polling needed for a demo remote."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def is_on(self):
"""Return true if remote is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the remote on."""
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the remote off."""
self._state = False
self.schedule_update_ha_state()
|
xifle/home-assistant
|
homeassistant/components/remote/demo.py
|
Python
|
mit
| 1,545
|
from __future__ import print_function
from BinPy import *
print ('Usage of IC 7425:\n')
ic = IC_7425()
print ('\nThe Pin configuration is:\n')
p = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
7: 0,
9: 1,
10: 1,
11: 1,
12: 1,
13: 1,
14: 1}
print (p)
print ('\nPin initialization -using -- ic.setIC(p) --\n')
ic.setIC(p)
print ('\nPowering up the IC - using -- ic.setIC({14:1,7:0}) -- \n')
ic.setIC({14: 1, 7: 0})
print ('\nDraw the IC with the current configuration\n')
ic.drawIC()
print (
'\nRun the IC with the current configuration using -- print ic.run() -- \n')
print (
'Note that the ic.run() returns a dict of pin configuration similar to :')
print (ic.run())
print (
'\nSeting the outputs to the current IC configuration using -- ic.setIC(ic.run()) --\n')
ic.setIC(ic.run())
print ('\nDraw the final configuration\n')
ic.drawIC()
print ('\nConnector Inputs\n')
print ('c = Connector(p[1])\np[1] = c\nic.setIC(p)\n')
c = Connector(p[1])
p[1] = c
ic.setIC(p)
print ('Run the IC\n')
print (ic.run())
print ('\nConnector Outputs')
print ('Set the output -- ic.setOutput(8, c)\n')
ic.setOutput(8, c)
print ('Run the IC\n')
print (ic.run())
|
coder006/BinPy
|
BinPy/examples/ic/Series_7400/IC7425.py
|
Python
|
bsd-3-clause
| 1,189
|
# OCR requires parameter tuning based on its image pattern.
# This project demonstrates how to use image pre-processing and then
# utilize tesseract package to recognize captcha.
# The accuracy of the proposed method can achieve up to 80%, which is acceptable for many web applications.
# Feel free to give me any feedback. You can mailto: allan920693@yahoo.com.tw
# Yu-Jia Chen 02:01 2016/8/17
import pytesseract
from PIL import Image, ImageEnhance, ImageFilter
import numpy
from collections import Counter
import floodFill
import cut_min_x_axis
import concat_images
def AllFiguretoFillInit(img):
TempFiguretoFill = set()
xsize, ysize = img.size
for i in range(xsize-1):
for j in range(ysize-1):
TempFiguretoFill.add((i,j))
return TempFiguretoFill
########## Assign Photo to solve and Initial Parameter ##########
path = 'test.jpg'
target_color = "white"
image = Image.open(path)
xsize, ysize = image.size
scale = max(xsize, ysize)
smallscale = min(xsize, ysize)
answer_num = 5 # expected number of ouput alphabet
pixelCount_threshold_init = int(0.0025*xsize*ysize) # expected area of noise
pixelCount_threshold_varaition_number = 5
pixelCount_threshold_varaition_ratio = 0.2
############### Inital Photo Array ############
global AllFiguretoFill
AllFiguretoFill = set()
for i in range(xsize-1):
for j in range(ysize-1):
AllFiguretoFill.add((i,j))
#print AllFiguretoFill
############### Plaintext into Recognition ############
text1 = pytesseract.image_to_string(image,config='-psm 9') #-psm 7 = Treat the image as a single text line.
print 'Step1 Result:'
print '\n'
print text1
print '\n'
############### Function 1: Binary Map ############
step2_img = image
step2_img = step2_img.convert('RGBA')
pix = step2_img.load()
for y in range(step2_img.size[1]):
for x in range(step2_img.size[0]):
if pix[x, y][0] < 102 or pix[x, y][1] < 102 or pix[x, y][2] < 102:
pix[x, y] = (0, 0, 0, 255)
else:
pix[x, y] = (255, 255, 255, 255)
step2_img.save('step2_img.jpg')
text2 = pytesseract.image_to_string(Image.open('step2_img.jpg'),config='-psm 9') #-psm 7 = Treat the image as a single text line.
print 'Step2 Result:'
print '\n'
print text2
print '\n'
############### Function 2: bright and contrast filter ############
step3_img = Image.open('step2_img.jpg')
step3_img = step3_img.filter(ImageFilter.MedianFilter())
enhancer = ImageEnhance.Contrast(step3_img)
step3_img = enhancer.enhance(10)
enhancer = ImageEnhance.Brightness(step3_img)
step3_img = enhancer.enhance(10)
step3_img = step3_img.convert('1')
step3_img.save('step3_img.jpg')
text3 = pytesseract.image_to_string(Image.open('step3_img.jpg'),config='-psm 9') #-psm 7 = Treat the image as a single text line.
print 'Step 3 Result:'
print '\n'
print text3
print '\n'
############### Function 3 : Erase small pixel array by sliding window ############
step4_img = Image.open('step3_img.jpg')
step4_img = step4_img.convert('RGB')
x_w_scale, y_w_scale = step4_img.size
tilted_window_scale =int(max(x_w_scale, y_w_scale)*0.037)
pix = step4_img.load()
window_size_x = int(x_w_scale*0.027)# 2% of the max(width,length)
window_size_y = int(max(x_w_scale, y_w_scale)*0.027)# 2% of the max(width,length)
if target_color == "black":
rgb_num=255
else:
rgb_num=0
for x in range(step4_img.size[0]): # cut vertical noise
window_index = 0
for window_index in range (step4_img.size[1]-window_size_y+1):
if pix[x,window_index][0]==rgb_num and pix[x,window_index+window_size_y-1][0]==rgb_num:
for i in range(window_size_y):
pix[x,window_index+i] = (rgb_num, rgb_num, rgb_num)
for y in range(step4_img.size[1]): # cut horizontal noise
window_index = 0
for window_index in range (step4_img.size[0]-window_size_x+1):
if pix[window_index,y][0]==rgb_num and pix[window_index+window_size_x-1,y][0]==rgb_num:
for i in range(window_size_x):
pix[window_index+i,y] = (rgb_num, rgb_num, rgb_num)
for x in range(step4_img.size[0]): # left-to-right tilted line cutting
window_index = 0
while (x+tilted_window_scale) < step4_img.size[0] and (window_index+tilted_window_scale) < step4_img.size[1]:
if pix[x,window_index][0]==rgb_num and pix[x+tilted_window_scale,window_index+tilted_window_scale][0]==rgb_num:
for i in range (tilted_window_scale):
pix[x+i,window_index+i] = (rgb_num, rgb_num, rgb_num)
x+=1
window_index+=1
for x in range(step4_img.size[0],0,-1): # left-to -right tilted line cutting
window_index = y_w_scale
while (x-tilted_window_scale) > 0 and (window_index-tilted_window_scale) > 0:
if pix[x-1,window_index-1][0]==rgb_num and pix[x-tilted_window_scale,window_index-tilted_window_scale][0]==rgb_num:
for i in range (tilted_window_scale):
pix[x-i-1,window_index-i-1] = (rgb_num, rgb_num, rgb_num)
x=x-1
window_index=window_index-1
for y in range(step4_img.size[1]): # Right-to-left tilted line cutting
window_index = x_w_scale
while (window_index-tilted_window_scale) > 0 and (y+tilted_window_scale) < step4_img.size[1]:
if pix[window_index-1,y][0]==rgb_num and pix[window_index-tilted_window_scale,y+tilted_window_scale][0]==rgb_num:
for i in range (tilted_window_scale):
pix[window_index-i-1,y+i] = (rgb_num, rgb_num, rgb_num)
y+=1
window_index=window_index-1
for y in range(step4_img.size[1],0,-1): # Right-to-left tilted line cutting
window_index = 0
while (window_index+tilted_window_scale) < step4_img.size[0] and (y-tilted_window_scale) > 0:
if pix[window_index,y-1][0]==rgb_num and pix[window_index+tilted_window_scale,y-tilted_window_scale][0]==rgb_num:
for i in range (tilted_window_scale):
pix[window_index+i,y-i-1] = (rgb_num, rgb_num, rgb_num)
y=y-1
window_index=window_index+1
step4_img.save('step4_img.jpg')
############### Cut Small Area & Extract target area & Noise area variation & Recognized by pytesseract ############
S_pixel = numpy.arange(1,1+pixelCount_threshold_varaition_ratio*pixelCount_threshold_varaition_number,pixelCount_threshold_varaition_ratio)
pixelCount_threshold_array = numpy.multiply(S_pixel,pixelCount_threshold_init )
text_final_mode5_min_x = []
text_final_mode6_min_x = []
text_final_mode7_min_x = []
text_final_mode8_min_x = []
text_final_mode9_min_x = []
text_final_min_x = []
text_final_mode5_max_x = []
text_final_mode6_max_x = []
text_final_mode7_max_x = []
text_final_mode8_max_x = []
text_final_mode9_max_x = []
text_final_max_x = []
new_text_final= [""for k in range(answer_num)]
target_count_array = []
Possible_target_area_array = []
Possible_target_area_array_combine = []
final_answer = [""for k in range(answer_num)]
Uncut_final_answer = [""for k in range(answer_num)]
target_x = []
min_x_reorderd_cutting_area_scale = [] # for cutting approriate sub figure
max_x_reorderd_cutting_area_scale = [] # for cutting approriate sub figure
min_x_reorderd_cutting_area_array = []
max_x_reorderd_cutting_area_array = []
all_img_dir_min_x = []
all_img_dir_max_x = []
sort_count_array = [ 0 for k in range(answer_num)]
img_min_x = [""for k in range(answer_num)]
img_max_x = [""for k in range(answer_num)]
if target_color == "black":
target_rgb_num=0
rgb_num=255
else:
target_rgb_num=255
rgb_num=0
path = 'step4_img.jpg'
image = Image.open(path)
for pixelCount_threshold in pixelCount_threshold_array:
AllFiguretoFill = AllFiguretoFillInit(image)
while (AllFiguretoFill):
(x,y) = AllFiguretoFill.pop()
newimage, Possible_target_area, areaCount, AllFiguretoFill = floodFill.floodFill(x,y,target_rgb_num,target_rgb_num,target_rgb_num,rgb_num,rgb_num,rgb_num,image,pixelCount_threshold,AllFiguretoFill) # Extract black color (0,0,0) and if the area is small: color white (255,255,255)
image = newimage.copy()
Possible_target_area_array_combine.append([areaCount,Possible_target_area])
if areaCount >= sort_count_array[0]:
del(sort_count_array[0])
sort_count_array.append(areaCount)
sort_count_array = sorted(sort_count_array)
Possible_target_area_array_combine = sorted(Possible_target_area_array_combine)
seleted_list_target_area_array = Possible_target_area_array_combine[-answer_num:]
Copy_list_target_area_array = seleted_list_target_area_array[:]
for index in range(answer_num): # Find the min x axis and max x axis of each possible target area
tempset = set()
new_temp_set = set()
shift_temp_set = set()
reshift_temp_set = set()
tempset = Copy_list_target_area_array[index][1].copy()
a_min_x = min(tempset)[0]
a_max_x = max(tempset)[0]
a_min_y = 0
a_max_y = 0
new_temp_set = tempset.copy()
while (new_temp_set):
(x,y) = new_temp_set.pop()
if y > a_max_y: a_max_y = y
if y < a_min_y: a_min_y = y
shift_temp_set = tempset.copy()
while (shift_temp_set): # reshift the area
(x,y) = shift_temp_set.pop()
reshift_temp_set.add((x-a_min_x+1,y-a_min_y+1))
target_x.append([[a_min_x,a_max_x],reshift_temp_set,[a_max_x-a_min_x,a_max_y-a_min_y]])
sorted_min_x_list = sorted(range(len(target_x)), key=lambda i: target_x[i][0][0])[-answer_num:]
sorted_max_x_list = sorted(range(len(target_x)), key=lambda i: target_x[i][0][1])[-answer_num:]
for index in sorted_min_x_list: # reorder the possible target area according to its min x axis
min_x_reorderd_cutting_area_array.append([target_x[index][0],target_x[index][1],target_x[index][2]])
for index in sorted_max_x_list: # reorder the possible target area according to its max x axis
max_x_reorderd_cutting_area_array.append([target_x[index][0],target_x[index][1],target_x[index][2]])
if (Possible_target_area_array_combine[-answer_num][0] < 2*pixelCount_threshold_init) or (target_x[answer_num-1][2][0]>=int(scale*1.6/answer_num)):
# the last one is not target area, one of the top answer_num-1 target area is overlapped (we can only detect one-overlapped char)
# or the max area is too wide, we are unable to recognize
to_seperate_mode = True
to_seperate_char_index_min_x = numpy.argmax(sorted_min_x_list)
to_seperate_char_index_max_x = numpy.argmax(sorted_max_x_list)
to_del_char_index_min_x = numpy.argmin(sorted_min_x_list)
to_del_char_index_max_x = numpy.argmin(sorted_max_x_list)
else: to_seperate_mode = False
if Possible_target_area_array_combine[-answer_num+1][0] < 2*pixelCount_threshold_init: # the last two are not target area, recommend not to make decision
make_decision = False
else : make_decision = True
for image_index in range(answer_num): # create image for the cutting area
temp_min_x_reorderd_cutting_area = min_x_reorderd_cutting_area_array[image_index][1].copy()
temp_max_x_reorderd_cutting_area = max_x_reorderd_cutting_area_array[image_index][1].copy()
img_x_scale_min_x = min_x_reorderd_cutting_area_array[image_index][2][0]+3
img_y_scale_min_x = min_x_reorderd_cutting_area_array[image_index][2][1]+3
img_x_scale_max_x = max_x_reorderd_cutting_area_array[image_index][2][0]+3
img_y_scale_max_x = max_x_reorderd_cutting_area_array[image_index][2][1]+3
new_cutting_img_min_x = numpy.zeros([img_y_scale_min_x,img_x_scale_min_x,3],dtype=numpy.uint8)
new_cutting_img_min_x.fill(255)
new_cutting_img_max_x = numpy.zeros([img_y_scale_max_x,img_x_scale_max_x,3],dtype=numpy.uint8)
new_cutting_img_max_x.fill(255)
img_min_x[image_index] = Image.fromarray(new_cutting_img_min_x, 'RGB')
img_max_x[image_index] = Image.fromarray(new_cutting_img_max_x, 'RGB')
while (temp_min_x_reorderd_cutting_area):
(x,y) = temp_min_x_reorderd_cutting_area.pop()
img_min_x[image_index].putpixel((x,y), (0,0,0))
while (temp_max_x_reorderd_cutting_area):
(x,y) = temp_max_x_reorderd_cutting_area.pop()
img_max_x[image_index].putpixel((x,y), (0,0,0))
img_min_x[image_index].save("min_x_cut_image_"+str(image_index)+".png")
img_max_x[image_index].save("max_x_cut_image_"+str(image_index)+".png")
all_img_dir_min_x.append("min_x_cut_image_"+str(image_index)+".png")
all_img_dir_max_x.append("max_x_cut_image_"+str(image_index)+".png")
reshape_min_x_img = concat_images.concat_images(all_img_dir_min_x)
reshape_min_x_img.save("combine_min_x_cut_image.png")
reshape_max_x_img = concat_images.concat_images(all_img_dir_max_x)
reshape_max_x_img.save("combine_max_x_cut_image.png")
temp_text_final_mode6_min_x = pytesseract.image_to_string(reshape_min_x_img,config='-psm 6 outputbase digits_and_letters') #-psm 6 = Assume a single uniform block of text. Use outputbase digits_and_letters
#temp_text_final_mode6 = temp_text_final_mode6.replace(" ", "") # delete white space
text_final_mode6_min_x.append(temp_text_final_mode6_min_x)
text_final_min_x.append(temp_text_final_mode6_min_x)
temp_text_final_mode7_min_x = pytesseract.image_to_string(reshape_min_x_img,config='-psm 7 outputbase digits_and_letters') #-psm 7 = Treat the image as a single text line.
#temp_text_final_mode7 = temp_text_final_mode7.replace(" ", "") # delete white space
text_final_mode7_min_x .append(temp_text_final_mode7_min_x )
text_final_min_x .append(temp_text_final_mode7_min_x )
temp_text_final_mode8_min_x = pytesseract.image_to_string(reshape_min_x_img,config='-psm 8 outputbase digits_and_letters') #-psm 8 = Treat the image as a single word.
#temp_text_final_mode8 = temp_text_final_mode8.replace(" ", "") # delete white space
text_final_mode8_min_x .append(temp_text_final_mode8_min_x )
text_final_min_x .append(temp_text_final_mode8_min_x )
temp_text_final_mode6_max_x = pytesseract.image_to_string(reshape_max_x_img,config='-psm 6 outputbase digits_and_letters') #-psm 6 = Assume a single uniform block of text. Use outputbase digits_and_letters
#temp_text_final_mode6 = temp_text_final_mode6.replace(" ", "") # delete white space
text_final_mode6_max_x.append(temp_text_final_mode6_max_x)
text_final_max_x.append(temp_text_final_mode6_max_x)
temp_text_final_mode7_max_x = pytesseract.image_to_string(reshape_max_x_img,config='-psm 7 outputbase digits_and_letters') #-psm 7 = Treat the image as a single text line.
#temp_text_final_mode7 = temp_text_final_mode7.replace(" ", "") # delete white space
text_final_mode7_max_x .append(temp_text_final_mode7_max_x )
text_final_max_x .append(temp_text_final_mode7_max_x )
temp_text_final_mode8_max_x = pytesseract.image_to_string(reshape_max_x_img,config='-psm 8 outputbase digits_and_letters') #-psm 8 = Treat the image as a single word.
#temp_text_final_mode8 = temp_text_final_mode8.replace(" ", "") # delete white space
text_final_mode8_max_x .append(temp_text_final_mode8_max_x )
text_final_max_x .append(temp_text_final_mode8_max_x )
print 'Final Result 6:'
print text_final_mode6_min_x
print text_final_mode6_max_x
print '\n'
print 'Final Result 7:'
print text_final_mode7_min_x
print text_final_mode7_max_x
print '\n'
print 'Final Result 8:'
print text_final_mode8_min_x
print text_final_mode8_max_x
print '\n'
text_final = text_final_mode6_min_x + text_final_mode6_max_x + text_final_mode7_min_x +text_final_mode7_max_x + text_final_mode8_min_x +text_final_mode8_max_x
for word_index in range (0,answer_num):
for answer_element in text_final:
answer_element = answer_element.replace(" ", "")
new_text_final[word_index] = answer_element
if len(answer_element)> word_index:
final_answer[word_index] = final_answer[word_index]+answer_element[word_index]
final_answer[word_index] = final_answer[word_index].encode('ascii')
final_answer[word_index] = final_answer[word_index].replace(" ", "") #delete white space
Uncut_final_answer[word_index] = Counter(final_answer[word_index]).most_common(3)
print 'Uncut Final Answer:'
print final_answer
print '\n'
###### Single overlapped char Splitting ########
text_final_char_array_min_x = [""for k in range(answer_num+2)]
text_final_char_array_max_x = [""for k in range(answer_num+2)]
image_index_min_x = 0
image_index_max_x = 0
new_text_final_char_array_min_x = [""for k in range(answer_num)]
new_text_final_char_array_max_x = [""for k in range(answer_num)]
for area_index in range (0,answer_num):
temp_char_min_x=""
temp_char_img_min_x = img_min_x[area_index].copy()
if to_seperate_mode and area_index==to_del_char_index_min_x: image_index_min_x=image_index_min_x+1 # delete non target char
elif to_seperate_mode and area_index==to_seperate_char_index_min_x:
cut_x_pix = cut_min_x_axis.cut_min_x_axis(temp_char_img_min_x,target_color)
temp_xsize, temp_ysize = temp_char_img_min_x.size
temp_img_left = temp_char_img_min_x.crop((0, 0, cut_x_pix, temp_ysize))
temp_img_right = temp_char_img_min_x.crop((cut_x_pix , 0, temp_xsize, temp_ysize))
char_6_min_x = pytesseract.image_to_string(temp_img_left,config='-psm 6 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_8_min_x = pytesseract.image_to_string(temp_img_left,config='-psm 8 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_9_min_x = pytesseract.image_to_string(temp_img_left,config='-psm 9 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_10_min_x = pytesseract.image_to_string(temp_img_left,config='-psm 10 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
temp_char_min_x = temp_char_min_x + char_6_min_x + char_8_min_x + char_9_min_x + char_10_min_x
text_final_char_array_min_x[image_index_min_x]= text_final_char_array_min_x[image_index_min_x] + temp_char_min_x
char_6_min_x = pytesseract.image_to_string(temp_img_right,config='-psm 6 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_8_min_x = pytesseract.image_to_string(temp_img_right,config='-psm 8 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_9_min_x = pytesseract.image_to_string(temp_img_right,config='-psm 9 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_10_min_x = pytesseract.image_to_string(temp_img_right,config='-psm 10 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
temp_char_min_x = temp_char_min_x + char_6_min_x + char_8_min_x + char_9_min_x + char_10_min_x
text_final_char_array_min_x[image_index_min_x+1]= text_final_char_array_min_x[image_index_min_x+1] + temp_char_min_x
image_index_min_x=image_index_min_x+2
else:
char_6_min_x = pytesseract.image_to_string(temp_char_img_min_x,config='-psm 6 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_8_min_x = pytesseract.image_to_string(temp_char_img_min_x,config='-psm 8 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_9_min_x = pytesseract.image_to_string(temp_char_img_min_x,config='-psm 9 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_10_min_x = pytesseract.image_to_string(temp_char_img_min_x,config='-psm 10 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
temp_char_min_x = temp_char_min_x + char_6_min_x + char_8_min_x + char_9_min_x + char_10_min_x
text_final_char_array_min_x[image_index_min_x]= text_final_char_array_min_x[image_index_min_x] + temp_char_min_x
image_index_min_x=image_index_min_x+1
for area_index in range (0,answer_num):
temp_char_max_x=""
temp_char_img_max_x = img_max_x[area_index].copy()
if to_seperate_mode and area_index==to_del_char_index_max_x: image_index_max_x=image_index_max_x+1 # delete non target char
elif to_seperate_mode and area_index==to_seperate_char_index_max_x:
cut_x_pix = cut_min_x_axis(temp_char_img_max_x,target_color)
temp_xsize, temp_ysize = temp_char_img_max_x.size
temp_img_left = temp_char_img_max_x.crop((0, 0, cut_x_pix, temp_ysize))
temp_img_right = temp_char_img_max_x.crop((cut_x_pix , 0, temp_xsize, temp_ysize))
char_6_max_x = pytesseract.image_to_string(temp_img_left,config='-psm 6 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_8_max_x = pytesseract.image_to_string(temp_img_left,config='-psm 8 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_9_max_x = pytesseract.image_to_string(temp_img_left,config='-psm 9 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_10_max_x = pytesseract.image_to_string(temp_img_left,config='-psm 10 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
temp_char_max_x = temp_char_max_x + char_6_max_x + char_8_max_x + char_9_max_x + char_10_max_x
text_final_char_array_max_x[image_index_max_x]= text_final_char_array_max_x[image_index_max_x] + temp_char_max_x
char_6_max_x = pytesseract.image_to_string(temp_img_right,config='-psm 6 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_8_max_x = pytesseract.image_to_string(temp_img_right,config='-psm 8 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_9_max_x = pytesseract.image_to_string(temp_img_right,config='-psm 9 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_10_max_x = pytesseract.image_to_string(temp_img_right,config='-psm 10 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
temp_char_max_x = temp_char_max_x + char_6_max_x + char_8_max_x + char_9_max_x + char_10_max_x
text_final_char_array_max_x[image_index_max_x+1]= text_final_char_array_max_x[image_index_max_x+1] + temp_char_max_x
image_index_max_x=image_index_max_x+2
else:
char_6_max_x = pytesseract.image_to_string(temp_char_img_max_x,config='-psm 6 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_8_max_x = pytesseract.image_to_string(temp_char_img_max_x,config='-psm 8 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_9_max_x = pytesseract.image_to_string(temp_char_img_max_x,config='-psm 9 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_10_max_x = pytesseract.image_to_string(temp_char_img_max_x,config='-psm 10 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
temp_char_max_x = temp_char_max_x + char_6_max_x + char_8_max_x + char_9_max_x + char_10_max_x
char_6_max_x = pytesseract.image_to_string(temp_char_img_max_x,config='-psm 6 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_8_max_x = pytesseract.image_to_string(temp_char_img_max_x,config='-psm 8 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_9_max_x = pytesseract.image_to_string(temp_char_img_max_x,config='-psm 9 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
char_10_max_x = pytesseract.image_to_string(temp_char_img_max_x,config='-psm 10 outputbase digits_and_letters') #-psm = 8 Treat the image as a single word.
temp_char_max_x = temp_char_max_x + char_6_max_x + char_8_max_x + char_9_max_x + char_10_max_x
text_final_char_array_max_x[image_index_max_x]= text_final_char_array_max_x[image_index_max_x] + temp_char_max_x
image_index_max_x=image_index_max_x+1
while(text_final_char_array_min_x):
char=text_final_char_array_min_x.pop()
if char!="" and char!=" ": new_text_final_char_array_min_x.insert(0,char)
while(text_final_char_array_max_x):
char=text_final_char_array_max_x.pop()
if char!="" and char!=" ": new_text_final_char_array_max_x.insert(0,char)
########## OutPut ##########
if to_seperate_mode == True: print 'It is hard'
if make_decision==False: print 'Can I keep silent'
print '\n'
print 'Uncut Final Answer:'
print final_answer
print '\n'
print 'Cut Final Answer:'
print new_text_final_char_array_min_x
print new_text_final_char_array_max_x
print '\n'
########## Combine both entire-image and splited-chart Result ##########
combine_array = [""for k in range(answer_num)]
combine_one_answer = [""for k in range(answer_num)]
weight_combine_array = [""for k in range(answer_num)]
weight_combine_one_answer = [""for k in range(answer_num)]
for index in range (0,answer_num):
combine_array[index] = final_answer[index] + new_text_final_char_array_min_x[index]+ new_text_final_char_array_max_x[index]
combine_one_answer[index] = Counter(combine_array[index]).most_common(3)
print 'Combine and Seclet one answer:'
print combine_one_answer
print '\n'
for index in range (0,answer_num):
if combine_one_answer[index] ==[]: combine_one_answer.remove(combine_one_answer[index])
output = ""
for index in range (0,len(combine_one_answer)):
output = output + combine_one_answer[index][0][0]
print 'Answer:'
print output
print '\n'
|
allan920693/Captcha-Solver-using-Pytesseract
|
Main.py
|
Python
|
mit
| 27,466
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Imports values from task 13 to test equality.
.. hint::
You can access task_12 data in the following example type:
.. code:: python
print task_12.FLOATVAL
"""
import task_12
FRAC_DEC_EQUAL = task_12.DECVAL == task_12.FRACVAL
DEC_FLOAT_INEQUAL = task_12.DECVAL != task_12.FLOATVAL
|
johnnymango/is210-week-03-warmup
|
task_13.py
|
Python
|
mpl-2.0
| 343
|
# Copyright (c) 2013, 2014
# Jose Luis Cercos-Pita <jlcercos@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bge
from bge import logic as g
from mathutils import *
from math import *
# Get the owner (should be the camera)
cont = bge.logic.getCurrentController()
own = cont.owner
# Get the general scene
scene = g.getCurrentScene()
# And get the missions manager from the owner.
# The mission manager has several utilities like
# loading objects from other blender files
Manager = own['mission_manager']
# Useful global variables
loaded = False
player = None
allies = []
enemies = []
def update():
""" This method is called each frame. """
if not loaded:
return
return
def load():
# Clear old data
loaded = False
player = None
allies = []
enemies = []
# We will add the objects in the scene origin, and move later
objlist = scene.objects
origin = objlist.get('MainScene.Origin')
if not origin:
raise Exception("Can't find the object 'MainScene.Origin'")
print('Loading U48 player object...')
Manager.load_blender_file('Player/U48/U48.blend')
active = scene.objects
inactive = scene.objectsInactive
if (not 'Ship.Main.U48' in active) and (not 'Ship.Main.U48' in inactive):
print('FAIL! The file was not loaded')
return
if 'Ship.Main.U48' in inactive:
player = scene.addObject('Ship.Main.U48', origin)
else:
player = active['Ship.Main.U48']
player['team'] = 0
print('OK!')
print('Loading Liberty enemy object...')
Manager.load_blender_file('AI/Liberty/Liberty.blend')
active = scene.objects
inactive = scene.objectsInactive
if (not 'Ship.Main.Liberty' in active) and (not 'Ship.Main.Liberty' in inactive):
print('FAIL! The file was not loaded')
return
if 'Ship.Main.Liberty' in inactive:
enemies.append(scene.addObject('Ship.Main.Liberty', origin))
else:
print('Warning: The object is in an active layer. Just one instance could be used')
enemies.append(active['Ship.Main.Liberty'])
enemies[0]['team'] = 1
enemies[0].worldPosition = Vector((0.0, 2000.0, 0.0))
# Damage the ship
enemies[0].worldPosition.z -= 12
enemies[0].worldOrientation = Euler((-radians(15.0), -radians(3.0), 0.0), 'XYZ')
enemies[0]['floating'] = 0.25
enemies[0]['angles'] = [-radians(15.0), -radians(3.0)]
enemies[0]['propulsion'] = 0.0
print('Loading bouy object...')
Manager.load_blender_file('AI/Bouy/Bouy.blend')
active = scene.objects
inactive = scene.objectsInactive
if (not 'Bouy' in active) and (not 'Bouy' in inactive):
print('FAIL! The file was not loaded')
return
if 'Bouy' in inactive:
obj = scene.addObject('Bouy', origin)
else:
print('Warning: The object is in an active layer. Just one instance could be used')
obj = active['Bouy']
obj.worldPosition = Vector((-190.0, 1900.0, 0.0))
print('OK!')
loaded = True
|
sanguinariojoe/sonsilentsea
|
resources/Campaigns/aTraining/aHurtWhale/__init__.py
|
Python
|
gpl-3.0
| 3,446
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsersOperations(object):
"""UsersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UserList"]
"""Gets all the users registered on a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param expand: Specify $expand=details to populate additional fields related to the resource or
Specify $skipToken=:code:`<token>` to populate the next page in the list.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UserList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2020_09_01.models.UserList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UserList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.User"
"""Gets the properties of the specified user.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: User, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2020_09_01.models.User
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.User"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('User', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
def _create_or_update_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
user, # type: "_models.User"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.User"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.User"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(user, 'User')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('User', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
def begin_create_or_update(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
user, # type: "_models.User"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.User"]
"""Creates a new user or updates an existing user's information on a Data Box Edge/Data Box
Gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param user: The user details.
:type user: ~azure.mgmt.databoxedge.v2020_09_01.models.User
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either User or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databoxedge.v2020_09_01.models.User]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.User"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
user=user,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('User', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
def _delete_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
def begin_delete(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the user on a databox edge/gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users/{name}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2020_09_01/operations/_users_operations.py
|
Python
|
mit
| 21,208
|
from application import app
#~ # Application Config
app.config.update(dict(
DEBUG = True,
SECRET_KEY = 'development key',
))
|
japeto/Tahoe-Motores
|
application/config.py
|
Python
|
gpl-3.0
| 134
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.